diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:22 +0000 |
commit | 38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch) | |
tree | 356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/community/aws | |
parent | Adding upstream version 7.7.0+dfsg. (diff) | |
download | ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip |
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
545 files changed, 27737 insertions, 24846 deletions
diff --git a/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py index 541a02b0f..b382e5eeb 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py @@ -1,17 +1,14 @@ #!/usr/bin/python -""" -Copyright (c) 2017 Ansible Project -GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -""" - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_bucket_info version_added: 1.0.0 +version_added_collection: community.aws author: - "Gerben Geijteman (@hyperized)" short_description: Lists S3 buckets in AWS @@ -39,70 +36,77 @@ options: - You can limit buckets by using the I(name) or I(name_filter) option. suboptions: bucket_accelerate_configuration: - description: Retrive S3 accelerate configuration. + description: Retrieve S3 accelerate configuration. type: bool default: False bucket_location: - description: Retrive S3 bucket location. + description: Retrieve S3 bucket location. type: bool default: False bucket_replication: - description: Retrive S3 bucket replication. + description: Retrieve S3 bucket replication. type: bool default: False bucket_acl: - description: Retrive S3 bucket ACLs. + description: Retrieve S3 bucket ACLs. type: bool default: False bucket_logging: - description: Retrive S3 bucket logging. + description: Retrieve S3 bucket logging. type: bool default: False bucket_request_payment: - description: Retrive S3 bucket request payment. + description: Retrieve S3 bucket request payment. type: bool default: False bucket_tagging: - description: Retrive S3 bucket tagging. + description: Retrieve S3 bucket tagging. type: bool default: False bucket_cors: - description: Retrive S3 bucket CORS configuration. + description: Retrieve S3 bucket CORS configuration. type: bool default: False bucket_notification_configuration: - description: Retrive S3 bucket notification configuration. + description: Retrieve S3 bucket notification configuration. type: bool default: False bucket_encryption: - description: Retrive S3 bucket encryption. + description: Retrieve S3 bucket encryption. type: bool default: False bucket_ownership_controls: description: - - Retrive S3 ownership controls. + - Retrieve S3 ownership controls. type: bool default: False bucket_website: - description: Retrive S3 bucket website. + description: Retrieve S3 bucket website. type: bool default: False bucket_policy: - description: Retrive S3 bucket policy. + description: Retrieve S3 bucket policy. type: bool default: False bucket_policy_status: - description: Retrive S3 bucket policy status. + description: Retrieve S3 bucket policy status. type: bool default: False bucket_lifecycle_configuration: - description: Retrive S3 bucket lifecycle configuration. + description: Retrieve S3 bucket lifecycle configuration. type: bool default: False public_access_block: - description: Retrive S3 bucket public access block. + description: Retrieve S3 bucket public access block. + type: bool + default: False + bucket_versioning: + description: + - Retrieve the versioning state of a bucket. + - To retrieve the versioning state of a bucket, you must be the bucket owner. type: bool default: False + version_added: 7.3.0 type: dict version_added: 1.4.0 transform_location: @@ -114,22 +118,22 @@ options: default: False version_added: 1.4.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Note: Only AWS S3 is currently supported # Lists all S3 buckets -- community.aws.s3_bucket_info: +- amazon.aws.s3_bucket_info: register: result # Retrieve detailed bucket information -- community.aws.s3_bucket_info: +- amazon.aws.s3_bucket_info: # Show only buckets with name matching name_filter: your.testing # Choose facts to retrieve @@ -157,9 +161,9 @@ EXAMPLES = ''' - name: List buckets ansible.builtin.debug: msg: "{{ result['buckets'] }}" -''' +""" -RETURN = ''' +RETURN = r""" bucket_list: description: "List of buckets" returned: always @@ -399,17 +403,27 @@ bucket_list: returned: always type: str sample: https -''' + bucket_versioning: + description: + - The versioning state of the bucket. + - This will also specify whether MFA delete is enabled in the bucket versioning configuration. + if only the bucket has been configured with MFA delete. + returned: when I(bucket_facts=true) and I(bucket_versioning=true) + type: dict + sample: { 'Status': 'Enabled' } + version_added: 7.2.0 +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict def get_bucket_list(module, connection, name="", name_filter=""): @@ -426,18 +440,18 @@ def get_bucket_list(module, connection, name="", name_filter=""): # Get all buckets try: - buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets'] + buckets = camel_dict_to_snake_dict(connection.list_buckets())["buckets"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: module.fail_json_aws(err_code, msg="Failed to list buckets") # Filter buckets if requested if name_filter: for bucket in buckets: - if name_filter in bucket['name']: + if name_filter in bucket["name"]: filtered_buckets.append(bucket) elif name: for bucket in buckets: - if name == bucket['name']: + if name == bucket["name"]: filtered_buckets.append(bucket) # Return proper list (filtered or all) @@ -450,12 +464,12 @@ def get_bucket_list(module, connection, name="", name_filter=""): def get_buckets_facts(connection, buckets, requested_facts, transform_location): """ - Retrive additional information about S3 buckets + Retrieve additional information about S3 buckets """ full_bucket_list = [] - # Iterate over all buckets and append retrived facts to bucket + # Iterate over all buckets and append Retrieved facts to bucket for bucket in buckets: - bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location)) + bucket.update(get_bucket_details(connection, bucket["name"], requested_facts, transform_location)) full_bucket_list.append(bucket) return full_bucket_list @@ -469,14 +483,14 @@ def get_bucket_details(connection, name, requested_facts, transform_location): for key in requested_facts: if requested_facts[key]: - if key == 'bucket_location': + if key == "bucket_location": all_facts[key] = {} try: all_facts[key] = get_bucket_location(name, connection, transform_location) # we just pass on error - error means that resources is undefined except botocore.exceptions.ClientError: pass - elif key == 'bucket_tagging': + elif key == "bucket_tagging": all_facts[key] = {} try: all_facts[key] = get_bucket_tagging(name, connection) @@ -494,7 +508,7 @@ def get_bucket_details(connection, name, requested_facts, transform_location): return all_facts -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_location(name, connection, transform_location=False): """ Get bucket location and optionally transform 'null' to 'us-east-1' @@ -504,16 +518,16 @@ def get_bucket_location(name, connection, transform_location=False): # Replace 'null' with 'us-east-1'? if transform_location: try: - if not data['LocationConstraint']: - data['LocationConstraint'] = 'us-east-1' + if not data["LocationConstraint"]: + data["LocationConstraint"] = "us-east-1" except KeyError: pass # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_tagging(name, connection): """ Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function @@ -521,15 +535,15 @@ def get_bucket_tagging(name, connection): data = connection.get_bucket_tagging(Bucket=name) try: - bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet']) + bucket_tags = boto3_tag_list_to_ansible_dict(data["TagSet"]) return bucket_tags except KeyError: # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_property(name, connection, get_api_name): """ Get bucket property @@ -539,7 +553,7 @@ def get_bucket_property(name, connection, get_api_name): data = api_function(Bucket=name) # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data @@ -549,27 +563,31 @@ def main(): :return: """ argument_spec = dict( - name=dict(type='str', default=""), - name_filter=dict(type='str', default=""), - bucket_facts=dict(type='dict', options=dict( - bucket_accelerate_configuration=dict(type='bool', default=False), - bucket_acl=dict(type='bool', default=False), - bucket_cors=dict(type='bool', default=False), - bucket_encryption=dict(type='bool', default=False), - bucket_lifecycle_configuration=dict(type='bool', default=False), - bucket_location=dict(type='bool', default=False), - bucket_logging=dict(type='bool', default=False), - bucket_notification_configuration=dict(type='bool', default=False), - bucket_ownership_controls=dict(type='bool', default=False), - bucket_policy=dict(type='bool', default=False), - bucket_policy_status=dict(type='bool', default=False), - bucket_replication=dict(type='bool', default=False), - bucket_request_payment=dict(type='bool', default=False), - bucket_tagging=dict(type='bool', default=False), - bucket_website=dict(type='bool', default=False), - public_access_block=dict(type='bool', default=False), - )), - transform_location=dict(type='bool', default=False) + name=dict(type="str", default=""), + name_filter=dict(type="str", default=""), + bucket_facts=dict( + type="dict", + options=dict( + bucket_accelerate_configuration=dict(type="bool", default=False), + bucket_acl=dict(type="bool", default=False), + bucket_cors=dict(type="bool", default=False), + bucket_encryption=dict(type="bool", default=False), + bucket_lifecycle_configuration=dict(type="bool", default=False), + bucket_location=dict(type="bool", default=False), + bucket_logging=dict(type="bool", default=False), + bucket_notification_configuration=dict(type="bool", default=False), + bucket_ownership_controls=dict(type="bool", default=False), + bucket_policy=dict(type="bool", default=False), + bucket_policy_status=dict(type="bool", default=False), + bucket_replication=dict(type="bool", default=False), + bucket_request_payment=dict(type="bool", default=False), + bucket_tagging=dict(type="bool", default=False), + bucket_website=dict(type="bool", default=False), + public_access_block=dict(type="bool", default=False), + bucket_versioning=dict(type="bool", default=False), + ), + ), + transform_location=dict(type="bool", default=False), ) # Ensure we have an empty dict @@ -577,11 +595,15 @@ def main(): # Define mutually exclusive options mutually_exclusive = [ - ['name', 'name_filter'] + ["name", "name_filter"], ] # Including ec2 argument spec - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) # Get parameters name = module.params.get("name") @@ -592,29 +614,29 @@ def main(): # Set up connection connection = {} try: - connection = module.client('s3') + connection = module.client("s3") except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: - module.fail_json_aws(err_code, msg='Failed to connect to AWS') + module.fail_json_aws(err_code, msg="Failed to connect to AWS") # Get basic bucket list (name + creation date) bucket_list = get_bucket_list(module, connection, name, name_filter) # Add information about name/name_filter to result if name: - result['bucket_name'] = name + result["bucket_name"] = name elif name_filter: - result['bucket_name_filter'] = name_filter + result["bucket_name_filter"] = name_filter # Gather detailed information about buckets if requested bucket_facts = module.params.get("bucket_facts") if bucket_facts: - result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) + result["buckets"] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) else: - result['buckets'] = bucket_list + result["buckets"] = bucket_list module.exit_json(msg="Retrieved s3 info.", **result) # MAIN -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/aliases index ffceccfcc..ffceccfcc 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/aliases index 2da398045..2da398045 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_group/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/files/deny-all.json index 3d324b9b9..3d324b9b9 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all.json +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/files/deny-all.json diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-assume.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/files/deny-assume.json index 73e877158..73e877158 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-assume.json +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/files/deny-assume.json diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/aliases index 839bd014b..839bd014b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/aliases index 140a2f2dc..140a2f2dc 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/aliases index 483c86115..483c86115 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-a.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-a.json index ae62fd197..ae62fd197 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-a.json +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-a.json diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-b.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-b.json index 3a4704a46..3a4704a46 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-b.json +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-b.json diff --git a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/aliases index 4ef4b2067..4ef4b2067 100644 --- a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/aliases index 4ef4b2067..4ef4b2067 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/aliases b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/aliases index 4ef4b2067..4ef4b2067 100644 --- a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml index 3acd99cf6..a60e58067 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml @@ -1,23 +1,21 @@ --- - name: Get S3 bucket ownership controls - aws_s3_bucket_info: + amazon.aws.s3_bucket_info: name_filter: "{{ name_pattern }}" bucket_facts: bucket_ownership_controls: true transform_location: true register: bucket_list - - name: Assert that buckets list contains requested bucket facts - assert: + ansible.builtin.assert: that: - item.name is search(name_pattern) - item.bucket_ownership_controls is defined loop: "{{ bucket_list.buckets }}" loop_control: label: "{{ item.name }}" - - name: Get complex S3 bucket list (including ownership controls) - aws_s3_bucket_info: + amazon.aws.s3_bucket_info: name_filter: "{{ name_pattern }}" bucket_facts: bucket_accelerate_configuration: true @@ -38,9 +36,8 @@ public_access_block: true transform_location: true register: bucket_list - - name: Assert that buckets list contains requested bucket facts - assert: + ansible.builtin.assert: that: - item.name is search(name_pattern) - item.bucket_accelerate_configuration is defined @@ -62,9 +59,8 @@ loop: "{{ bucket_list.buckets }}" loop_control: label: "{{ item.name }}" - - name: Assert that retrieved bucket facts contains valid data - assert: + ansible.builtin.assert: that: - item.bucket_acl.Owner is defined - item.bucket_tagging.snake_case is defined diff --git a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 index 559562fd9..559562fd9 100644 --- a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 diff --git a/ansible_collections/community/aws/.github/workflows/all_green_ckeck.yml b/ansible_collections/community/aws/.github/workflows/all_green_ckeck.yml new file mode 100644 index 000000000..daa3e8bdf --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/all_green_ckeck.yml @@ -0,0 +1,42 @@ +--- +name: all_green + +concurrency: + group: ${{ github.head_ref }} + cancel-in-progress: true + +on: # yamllint disable-line rule:truthy + pull_request: + types: + - opened + - reopened + - labeled + - unlabeled + - synchronize + branches: + - main + - 'stable-*' + tags: + - '*' + +jobs: + changelog-and-linters: + uses: ./.github/workflows/changelog_and_linters.yml # use the callable changelog_and_linters job to run tests + sanity: + uses: ./.github/workflows/sanity.yml # use the callable sanity job to run tests + units: + uses: ./.github/workflows/units.yml # use the callable units job to run tests + all_green: + if: ${{ always() }} + needs: + - changelog-and-linters + - sanity + - units + runs-on: ubuntu-latest + steps: + - run: >- + python -c "assert set([ + '${{ needs.changelog-and-linters.result }}', + '${{ needs.sanity.result }}', + '${{ needs.units.result }}' + ]) == {'success'}" diff --git a/ansible_collections/community/aws/.github/workflows/changelog_and_linters.yml b/ansible_collections/community/aws/.github/workflows/changelog_and_linters.yml new file mode 100644 index 000000000..4029505d0 --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/changelog_and_linters.yml @@ -0,0 +1,13 @@ +--- +name: changelog and linters + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + changelog: + uses: ansible-network/github_actions/.github/workflows/changelog.yml@main + linters: + uses: ansible-network/github_actions/.github/workflows/tox.yml@main + with: + envname: "" + labelname: "lint" diff --git a/ansible_collections/community/aws/.github/workflows/galaxy-importer.yml b/ansible_collections/community/aws/.github/workflows/galaxy-importer.yml new file mode 100644 index 000000000..721f7476e --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/galaxy-importer.yml @@ -0,0 +1,12 @@ +name: Galaxy Importer +on: + push: + branches: + - main + - stable-* + pull_request: + schedule: + - cron: '0 13 * * *' +jobs: + importer: + uses: ansible-network/github_actions/.github/workflows/galaxy_importer.yml@main diff --git a/ansible_collections/community/aws/.github/workflows/release-manual.yml b/ansible_collections/community/aws/.github/workflows/release-manual.yml new file mode 100644 index 000000000..19f6dc8d6 --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/release-manual.yml @@ -0,0 +1,35 @@ +name: Generate GitHub Release (manual trigger) +concurrency: + group: release-${{ github.head_ref }} + cancel-in-progress: true +on: + workflow_dispatch: + inputs: + release: + required: true + description: 'Release to generate' + type: string + +jobs: + generate-release-log: + permissions: + contents: read + runs-on: ubuntu-latest + steps: + - name: Generate Release Log + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_log@main + with: + release: ${{ inputs.release }} + + perform-release: + permissions: + contents: write + runs-on: ubuntu-latest + needs: + - generate-release-log + steps: + - name: Generate Release + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_tag@main + with: + release: ${{ inputs.release }} + collection-name: community.aws diff --git a/ansible_collections/community/aws/.github/workflows/release-tag.yml b/ansible_collections/community/aws/.github/workflows/release-tag.yml new file mode 100644 index 000000000..cfb6c2838 --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/release-tag.yml @@ -0,0 +1,32 @@ +name: Generate GitHub Release +concurrency: + group: release-${{ github.head_ref }} + cancel-in-progress: true +on: + push: + tags: + - '*' + +jobs: + generate-release-log: + permissions: + contents: read + runs-on: ubuntu-latest + steps: + - name: Generate Release Log + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_log@main + with: + release: ${{ github.ref_name }} + + perform-release: + permissions: + contents: write + runs-on: ubuntu-latest + needs: + - generate-release-log + steps: + - name: Generate Release + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_tag@main + with: + release: ${{ github.ref_name }} + collection-name: community.aws diff --git a/ansible_collections/community/aws/.github/workflows/sanity.yml b/ansible_collections/community/aws/.github/workflows/sanity.yml new file mode 100644 index 000000000..161dabfe2 --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/sanity.yml @@ -0,0 +1,10 @@ +--- +name: sanity tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + sanity: + uses: ansible-network/github_actions/.github/workflows/sanity.yml@main + with: + matrix_include: "[]" diff --git a/ansible_collections/community/aws/.github/workflows/units.yml b/ansible_collections/community/aws/.github/workflows/units.yml new file mode 100644 index 000000000..7dddcc610 --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/units.yml @@ -0,0 +1,10 @@ +--- +name: unit tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + unit-source: + uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main + with: + collection_pre_install: '-r source/tests/unit/requirements.yml' diff --git a/ansible_collections/community/aws/.github/workflows/update-variables.yml b/ansible_collections/community/aws/.github/workflows/update-variables.yml new file mode 100644 index 000000000..f92f77cc6 --- /dev/null +++ b/ansible_collections/community/aws/.github/workflows/update-variables.yml @@ -0,0 +1,17 @@ +--- +name: update collection variables + +concurrency: + group: '${{ github.workflow }} @ ${{ github.sha }}' + cancel-in-progress: true + +on: + push: + branches: + - main + - 'stable-*' + pull_request_target: + +jobs: + update-variables: + uses: ansible-network/github_actions/.github/workflows/update_aws_variables.yml@main diff --git a/ansible_collections/community/aws/.gitignore b/ansible_collections/community/aws/.gitignore index 3b4462815..ed1c302f0 100644 --- a/ansible_collections/community/aws/.gitignore +++ b/ansible_collections/community/aws/.gitignore @@ -1,7 +1,7 @@ # Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv # Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv - +tests/integration/inventory ### dotenv ### .env diff --git a/ansible_collections/community/aws/.yamllint b/ansible_collections/community/aws/.yamllint new file mode 100644 index 000000000..ac5297cdf --- /dev/null +++ b/ansible_collections/community/aws/.yamllint @@ -0,0 +1,15 @@ +--- +rules: + indentation: + ignore: &default_ignores | + # automatically generated, we can't control it + changelogs/changelog.yaml + # Will be gone when we release and automatically reformatted + changelogs/fragments/* + document-start: + ignore: *default_ignores + line-length: + ignore: *default_ignores + max: 160 + +ignore-from-file: .gitignore diff --git a/ansible_collections/community/aws/CHANGELOG.rst b/ansible_collections/community/aws/CHANGELOG.rst index 7b5761863..651b7c763 100644 --- a/ansible_collections/community/aws/CHANGELOG.rst +++ b/ansible_collections/community/aws/CHANGELOG.rst @@ -5,6 +5,200 @@ community.aws Release Notes .. contents:: Topics +v7.1.0 +====== + +Release Summary +--------------- + +This release includes new features for the ``cloudfront_distribution`` and ``mq_broker`` modules, as well as a bugfix for the ``aws_ssm`` connection plugin needed when connecting to hosts with Bash 5.1.0 and later. + +Minor Changes +------------- + +- aws_ssm - Updated the documentation to explicitly state that an S3 bucket is required, the behavior of the files in that bucket, and requirements around that. (https://github.com/ansible-collections/community.aws/issues/1775). +- cloudfront_distribution - added support for ``cache_policy_id`` and ``origin_request_policy_id`` for behaviors (https://github.com/ansible-collections/community.aws/pull/1589) +- mq_broker - add support to wait for broker state via ``wait`` and ``wait_timeout`` parameter values (https://github.com/ansible-collections/community.aws/pull/1879). + +Bugfixes +-------- + +- aws_ssm - disable `enable-bracketed-paste` to fix issue with amazon linux 2023 and other OSes (https://github.com/ansible-collections/community.aws/issues/1756) + +v7.0.0 +====== + +Release Summary +--------------- + +This release includes some new features, bugfixes and breaking changes. Several modules have been migrated to amazon.aws and the Fully Qualified Collection Name for these modules needs to be updated. The community.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/), support for Python less than 3.7 by this collection was deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). + +Minor Changes +------------- + +- api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962). +- api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962). +- community.aws collection - apply isort code formatting to ensure consistent formatting of code (https://github.com/ansible-collections/community.aws/pull/1962) +- ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891). +- eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True`` (https://github.com/ansible-collections/community.aws/pull/1994). + +Breaking Changes / Porting Guide +-------------------------------- + +- The community.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763). +- aws_region_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_region_info``. +- aws_s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_s3_bucket_info``. +- community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection wss been deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). +- iam_access_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key``. +- iam_access_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key_info``. +- iam_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945). +- iam_managed_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954). +- iam_mfa_device_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953). +- iam_password_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_password_policy``. +- iam_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948). +- iam_role_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948). +- s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.s3_bucket_info``. +- sts_assume_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.sts_assume_role``. + +Bugfixes +-------- + +- mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832). +- opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910). + +v6.2.0 +====== + +Release Summary +--------------- + +This release includes some new features for the ``community.aws.ec2_vpc_vpn`` and ``community.aws.api_gateway`` modules. + +Minor Changes +------------- + +- api_gateway - add support for parameters ``name``, ``lookup``, ``tags`` and ``purge_tags`` (https://github.com/ansible-collections/community.aws/pull/1845). +- ec2_vpc_vpn - add support for connecting VPNs to a transit gateway (https://github.com/ansible-collections/community.aws/pull/1877). + +Bugfixes +-------- + +- Remove ``apigateway`` and ``apigateway_deployment`` from meta/runtime.yml (https://github.com/ansible-collections/community.aws/pull/1905). + +v6.1.0 +====== + +Release Summary +--------------- + +This release brings a new inventory plugin, some new features, and several bugfixes. + +Minor Changes +------------- + +- dynamodb_table - added waiter when updating indexes to avoid concurrency issues (https://github.com/ansible-collections/community.aws/pull/1866). +- dynamodb_table - increased default timeout based on time to update indexes in CI (https://github.com/ansible-collections/community.aws/pull/1866). +- iam_group - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). +- iam_role - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). +- sns_topic - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). + +Bugfixes +-------- + +- batch_compute_environment - fixed incorrect handling of Gov Cloud ARNs in ``compute_environment_name`` parameter (https://github.com/ansible-collections/community.aws/issues/1846). +- cloudfront_distribution - The origins recognises the s3 domains with region part now (https://github.com/ansible-collections/community.aws/issues/1819). +- cloudfront_distribution - no longer crashes when waiting for completion of creation (https://github.com/ansible-collections/community.aws/issues/255). +- cloudfront_distribution - now honours the ``enabled`` setting (https://github.com/ansible-collections/community.aws/issues/1823). +- dynamodb_table - secondary indexes are now created (https://github.com/ansible-collections/community.aws/issues/1825). +- ec2_launch_template - fixed incorrect handling of Gov Cloud ARNs in ``compute_environment_name`` parameter (https://github.com/ansible-collections/community.aws/issues/1846). +- elasticache_info - remove hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). +- iam_role - fixed incorrect rejection of Gov Cloud ARNs in ``boundary`` parameter (https://github.com/ansible-collections/community.aws/issues/1846). +- msk_cluster - remove hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). +- redshift - fixed hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). + +New Plugins +----------- + +Inventory +~~~~~~~~~ + +- aws_mq - MQ broker inventory source + +v6.0.0 +====== + +Release Summary +--------------- + +This release brings some new plugins and features. Several bugfixes, breaking changes and deprecated features are also included. +The community.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. +Support for Python 3.6 has also been dropped. + + +Minor Changes +------------- + +- The ``black`` code formatter has been run across the collection to improve code consistency (https://github.com/ansible-collections/community.aws/pull/1784). +- aws_config_delivery_channel - add support for encrypted objects in S3 via KMS key (https://github.com/ansible-collections/community.aws/pull/1786). +- aws_ssm - Updated the documentation to explicitly mention that the ``ansible_user`` and ``remote_user`` variables are not supported by the plugin (https://github.com/ansible-collections/community.aws/pull/1682). +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/community.aws/pull/1810). +- cloudfront_distribution - add ``http3`` support via parameter value ``http2and3`` for parameter ``http_version`` (https://github.com/ansible-collections/community.aws/pull/1753). +- cloudfront_distribution - add ``origin_shield`` options (https://github.com/ansible-collections/community.aws/pull/1557). +- cloudfront_distribution - documented ``connection_attempts`` and ``connection_timeout`` the module was already capable of using them +- community.aws - updated document fragments based on changes in amazon.aws (https://github.com/ansible-collections/community.aws/pull/1738). +- community.aws - updated imports based on changes in amazon.aws (https://github.com/ansible-collections/community.aws/pull/1738). +- ecs_ecr - use ``compare_policies`` when comparing lifecycle policies instead of naive ``sort_json_policy_dict`` comparisons (https://github.com/ansible-collections/community.aws/pull/1551). +- elasticache - Use the ``cache.t3.small`` node type in the example. ``cache.m1.small`` is not deprecated. +- minor code fixes and enable integration tests for modules cloudfront_distribution, cloudfront_invalidation and cloudfront_origin_access_identity (https://github.com/ansible-collections/community.aws/pull/1596). +- module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/community.aws/pull/1632). +- wafv2_rule_group_info - remove unused and deprecated ``state`` parameter (https://github.com/ansible-collections/community.aws/pull/1555). + +Breaking Changes / Porting Guide +-------------------------------- + +- The community.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/community.aws/pull/1743). +- aws_ssm - the AWS SSM plugin was incorrectly prepending ``sudo`` to most commands. This behaviour was incorrect and has been removed. To execute commands as a specific user, including the ``root`` user, the ``become`` and ``become_user`` directives should be used. See the `Ansible documentation for more information <https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_privilege_escalation.html>`_ (https://github.com/ansible-collections/community.aws/issues/853). +- codebuild_project - ``tags`` parameter now accepts a dict representing the tags, rather than the boto3 format (https://github.com/ansible-collections/community.aws/pull/1643). + +Deprecated Features +------------------- + +- community.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.8 by this collection is expected to be removed in a release after 2024-12-01 (https://github.com/ansible-collections/community.aws/pull/1743). +- community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in release 7.0.0. (https://github.com/ansible-collections/community.aws/pull/1743). + +Bugfixes +-------- + +- opensearch_info - Fix the name of the domain_name key in the example (https://github.com/ansible-collections/community.aws/pull/1811). +- ses_identity - fix clearing notification topic (https://github.com/ansible-collections/community.aws/issues/150). + +New Modules +----------- + +- ec2_carrier_gateway - Manage an AWS VPC Carrier gateway +- ec2_carrier_gateway_info - Gather information about carrier gateways in AWS +- lightsail_snapshot - Creates snapshots of AWS Lightsail instances +- mq_broker - MQ broker management +- mq_broker_config - Update Amazon MQ broker configuration +- mq_broker_info - Retrieve MQ Broker details +- mq_user - Manage users in existing Amazon MQ broker +- mq_user_info - List users of an Amazon MQ broker +- ssm_inventory_info - Get SSM inventory information for EC2 instance + +v5.5.1 +====== + +Release Summary +--------------- + +This release brings several bugfixes. + +Bugfixes +-------- + +- cloudfront_distribution - no longer crashes when waiting for completion of creation (https://github.com/ansible-collections/community.aws/issues/255). +- cloudfront_distribution - now honours the ``enabled`` setting (https://github.com/ansible-collections/community.aws/issues/1823). + v5.5.0 ====== @@ -156,7 +350,7 @@ Bugfixes -------- - aws_ssm - fixes S3 bucket region detection by ensuring boto client has correct credentials and exists in correct partition (https://github.com/ansible-collections/community.aws/pull/1428). -- ec2_snapshot_copy - including tags caused the erorr "Tag specification resource type must have a value". Fix sets the ResourceType to snapshot to resolve this issue (https://github.com/ansible-collections/community.aws/pull/1419). +- ec2_snapshot_copy - including tags caused the erorr ``Tag specification resource type must have a value``. Fix sets the ResourceType to snapshot to resolve this issue (https://github.com/ansible-collections/community.aws/pull/1419). - ecs_ecr - fix a ``RepositoryNotFound`` exception when trying to create repositories in check mode (https://github.com/ansible-collections/community.aws/pull/1550). - opensearch - Fix cluster creation when using advanced security options (https://github.com/ansible-collections/community.aws/pull/1613). @@ -321,7 +515,7 @@ Bugfixes - ec2_placement_group - Handle a potential race creation during the creation of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). - elb_network_lb - fixes bug where ``ip_address_type`` in return value was not updated (https://github.com/ansible-collections/community.aws/pull/1365). -- rds_cluster - fixes bug where specifiying an rds cluster parameter group raises a `KeyError` (https://github.com/ansible-collections/community.aws/pull/1417). +- rds_cluster - fixes bug where specifiying an rds cluster parameter group raises a ``KeyError`` (https://github.com/ansible-collections/community.aws/pull/1417). - s3_sync - fix etag generation when running in FIPS mode (https://github.com/ansible-collections/community.aws/issues/757). New Modules @@ -329,6 +523,91 @@ New Modules - accessanalyzer_validate_policy_info - Performs validation of IAM policies +v4.5.1 +====== + +Release Summary +--------------- + +This release contains a minor bugfix for the ``sns_topic`` module as well as corrections to the documentation for various modules. This is the last planned release of the 4.x series. + + +Bugfixes +-------- + +- sns_topic - avoid fetching attributes from subscribers when not setting them, this can cause permissions issues (https://github.com/ansible-collections/community.aws/pull/1418). + +v4.5.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.aws`` collection. + +Minor Changes +------------- + +- ecs_service - support load balancer update for existing ecs services(https://github.com/ansible-collections/community.aws/pull/1625). +- iam_role - Drop deprecation warning, because the standard value for purge parametes is ``true`` (https://github.com/ansible-collections/community.aws/pull/1636). + +Bugfixes +-------- + +- aws_ssm - fix ``invalid literal for int`` error on some operating systems (https://github.com/ansible-collections/community.aws/issues/113). +- ecs_service - respect ``placement_constraints`` for existing ecs services (https://github.com/ansible-collections/community.aws/pull/1601). +- s3_lifecycle - Module no longer calls ``put_lifecycle_configuration`` if there is no change. (https://github.com/ansible-collections/community.aws/issues/1624) +- ssm_parameter - Fix a ``KeyError`` when adding a description to an existing parameter (https://github.com/ansible-collections/community.aws/issues/1471). + +v4.4.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.aws`` collection. +This changelog contains all changes to the modules and plugins in this collection +that have been made after the previous release. + +Minor Changes +------------- + +- elasticache_parameter_group - add ``redis6.x`` group family on the module input choices (https://github.com/ansible-collections/community.aws/pull/1476). + +Bugfixes +-------- + +- aws_ssm - fixes S3 bucket region detection by ensuring boto client has correct credentials and exists in correct partition (https://github.com/ansible-collections/community.aws/pull/1428). +- ecs_ecr - fix a ``RepositoryNotFound`` exception when trying to create repositories in check mode (https://github.com/ansible-collections/community.aws/pull/1550). +- opensearch - Fix cluster creation when using advanced security options (https://github.com/ansible-collections/community.aws/pull/1613). + +v4.3.0 +====== + +Release Summary +--------------- + +The community.aws 4.3.0 release includes a number of minor bug fixes and improvements. +Following the release of amazon.aws 5.0.0, backports to the 4.x series will be limited to security issues and bugfixes. + +Minor Changes +------------- + +- autoscaling_group_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_distribution - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_origin_access_identity - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudtrail - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- ec2_vpc_nacl - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- eks_fargate_profile - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- redshift - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- s3_bucket_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + +Bugfixes +-------- + +- ec2_placement_group - Handle a potential race creation during the creation of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). +- rds_cluster - fixes bug where specifiying an rds cluster parameter group raises a ``KeyError`` (https://github.com/ansible-collections/community.aws/pull/1417). + v4.2.0 ====== @@ -385,7 +664,7 @@ Deprecated Features - community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/community.aws/pull/1361). - iam_policy - the ``policies`` return value has been renamed ``policy_names`` and will be removed in a release after 2024-08-01, both values are currently returned (https://github.com/ansible-collections/community.aws/pull/1375). - lambda_info - The ``function`` return key returns a dictionary of dictionaries and has been deprecated. In a release after 2025-01-01, this key will be removed in favor of ``functions``, which returns a list of dictionaries (https://github.com/ansible-collections/community.aws/pull/1239). -- route53_info - The CamelCase return values for ``DelegationSets``, ``CheckerIpRanges``, and ``HealthCheck`` have been deprecated, in the future release you must use snake_case return values ``delegation_sets``, ``checker_ip_ranges``, and ``health_check`` instead respectively" (https://github.com/ansible-collections/community.aws/pull/1322). +- route53_info - The CamelCase return values for ``DelegationSets``, ``CheckerIpRanges``, and ``HealthCheck`` have been deprecated, in the future release you must use snake_case return values ``delegation_sets``, ``checker_ip_ranges``, and ``health_check`` instead respectively (https://github.com/ansible-collections/community.aws/pull/1322). Bugfixes -------- @@ -548,7 +827,7 @@ Removed Features (previously deprecated) ---------------------------------------- - aws_kms_info - the unused and deprecated ``keys_attr`` parameter has been removed (https://github.com/ansible-collections/amazon.aws/pull/1172). -- data_pipeline - the ``version`` option has always been ignored and has been removed (https://github.com/ansible-collections/community.aws/pull/1160" +- data_pipeline - the ``version`` option has always been ignored and has been removed (https://github.com/ansible-collections/community.aws/pull/1160 - ec2_eip - The ``wait_timeout`` option has been removed. It has always been ignored by the module (https://github.com/ansible-collections/community.aws/pull/1159). - ec2_lc - the ``associate_public_ip_address`` option has been removed. It has always been ignored by the module (https://github.com/ansible-collections/community.aws/pull/1158). - ec2_metric_alarm - support for using the ``<=``, ``<``, ``>`` and ``>=`` operators for comparison has been dropped. Please use ``LessThanOrEqualToThreshold``, ``LessThanThreshold``, ``GreaterThanThreshold`` or ``GreaterThanOrEqualToThreshold`` instead (https://github.com/ansible-collections/amazon.aws/pull/1164). @@ -602,6 +881,33 @@ New Modules - opensearch_info - obtain information about one or more OpenSearch or ElasticSearch domain - rds_cluster_snapshot - Manage Amazon RDS snapshots of DB clusters +v3.6.0 +====== + +Release Summary +--------------- + +Following the release of community.aws 5.0.0, 3.6.0 is a bugfix release and the final planned release for the 3.x series. + + +Minor Changes +------------- + +- autoscaling_group_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_distribution - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_origin_access_identity - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudtrail - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- ec2_asg_lifecycle_hook - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- ec2_vpc_nacl - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- redshift - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- s3_bucket_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + +Bugfixes +-------- + +- ec2_placement_group - Handle a potential race creation during the creation of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). +- s3_lifecycle - fix bug when deleting rules with an empty prefix (https://github.com/ansible-collections/community.aws/pull/1398). + v3.5.0 ====== @@ -649,7 +955,7 @@ Deprecated Features ------------------- - aws_codebuild - The ``tags`` parameter currently uses a non-standard format and has been deprecated. In release 6.0.0 this parameter will accept a simple key/value pair dictionary instead of the current list of dictionaries. It is recommended to migrate to using the resource_tags parameter which already accepts the simple dictionary format (https://github.com/ansible-collections/community.aws/pull/1221). -- route53_info - The CamelCase return values for ``HostedZones``, ``ResourceRecordSets``, and ``HealthChecks`` have been deprecated, in the future release you must use snake_case return values ``hosted_zones``, ``resource_record_sets``, and ``health_checks`` instead respectively". +- route53_info - The CamelCase return values for ``HostedZones``, ``ResourceRecordSets``, and ``HealthChecks`` have been deprecated, in the future release you must use snake_case return values ``hosted_zones``, ``resource_record_sets``, and ``health_checks`` instead respectively. Bugfixes -------- @@ -914,6 +1220,65 @@ Bugfixes - aws_eks - Fix EKS cluster creation with short names (https://github.com/ansible-collections/community.aws/pull/818). +v2.6.1 +====== + +Release Summary +--------------- + +Bump collection from 2.6.0 to 2.6.1 due to a publishing error with 2.6.0. This release supersedes 2.6.0 entirely, users should skip 2.6.0. + +v2.6.0 +====== + +Release Summary +--------------- + +This is the last planned 2.x release of the ``community.aws`` collection. +Consider upgrading to the latest version of ``community.aws`` soon. + +Minor Changes +------------- + +- ecs_service - ``deployment_circuit_breaker`` has been added as a supported feature (https://github.com/ansible-collections/community.aws/pull/1215). +- ecs_service - add ``service`` alias to address the ecs service name with the same parameter as the ecs_service_info module is doing (https://github.com/ansible-collections/community.aws/pull/1187). +- ecs_service_info - add ``name`` alias to address the ecs service name with the same parameter as the ecs_service module is doing (https://github.com/ansible-collections/community.aws/pull/1187). + +Bugfixes +-------- + +- ecs_service - fix broken change detect of ``health_check_grace_period_seconds`` parameter when not specified (https://github.com/ansible-collections/community.aws/pull/1212). +- ecs_service - use default cluster name of ``default`` when not input (https://github.com/ansible-collections/community.aws/pull/1212). +- ecs_task - dont require ``cluster`` and use name of ``default`` when not input (https://github.com/ansible-collections/community.aws/pull/1212). +- wafv2_ip_set - fix bug where incorrect changed state was returned when only changing the description (https://github.com/ansible-collections/community.aws/pull/1211). + +v2.5.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.aws`` collection. + +Minor Changes +------------- + +- iam_policy - update broken examples and add RETURN section to documentation; add extra integration tests for idempotency check mode runs (https://github.com/ansible-collections/community.aws/pull/1093). +- iam_role - delete inline policies prior to deleting role (https://github.com/ansible-collections/community.aws/pull/1054). +- iam_role - remove global vars and refactor accordingly (https://github.com/ansible-collections/community.aws/pull/1054). + +Bugfixes +-------- + +- ecs_service - add missing change detect of ``health_check_grace_period_seconds`` parameter (https://github.com/ansible-collections/community.aws/pull/1145). +- ecs_service - fix broken compare of ``task_definition`` that results always in a changed task (https://github.com/ansible-collections/community.aws/pull/1145). +- ecs_service - fix validation for ``placement_constraints``. It's possible to use ``distinctInstance`` placement constraint now (https://github.com/ansible-collections/community.aws/issues/1058) +- ecs_taskdefinition - fix broken change detect of ``launch_type`` parameter (https://github.com/ansible-collections/community.aws/pull/1145). +- execute_lambda - fix check mode and update RETURN documentation (https://github.com/ansible-collections/community.aws/pull/1115). +- iam_policy - require one of ``policy_document`` and ``policy_json`` when state is present to prevent MalformedPolicyDocumentException from being thrown (https://github.com/ansible-collections/community.aws/pull/1093). +- s3_lifecycle - add support of value *0* for ``transition_days`` (https://github.com/ansible-collections/community.aws/pull/1077). +- s3_lifecycle - check that configuration is complete before returning (https://github.com/ansible-collections/community.aws/pull/1085). + v2.4.0 ====== @@ -1534,7 +1899,7 @@ Bugfixes - cloudfront_distribution - Always add field_level_encryption_id to cache behaviour to match AWS requirements - cloudwatchlogs_log_group - Fix a KeyError when updating a log group that does not have a retention period (https://github.com/ansible/ansible/issues/47945) - cloudwatchlogs_log_group_info - remove limitation of max 50 results -- ec2_asg - Ensure "wait" is honored during replace operations +- ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing transit gateways - ec2_transit_gateway - fixed issue where auto_attach set to yes was not being honored (https://github.com/ansible/ansible/issues/61907) diff --git a/ansible_collections/community/aws/CONTRIBUTING.md b/ansible_collections/community/aws/CONTRIBUTING.md index db74cf119..eb41c9274 100644 --- a/ansible_collections/community/aws/CONTRIBUTING.md +++ b/ansible_collections/community/aws/CONTRIBUTING.md @@ -1,15 +1,5 @@ # Contributing -## Getting Started - -General information about setting up your Python environment, testing modules, -Ansible coding styles, and more can be found in the [Ansible Community Guide]( -https://docs.ansible.com/ansible/latest/community/index.html). - -Information about boto library usage, module utils, testing, and more can be -found in the [AWS Guidelines](https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html) -documentation. - ## AWS Collections There are two related collections containing AWS content (modules and plugins). @@ -18,7 +8,7 @@ There are two related collections containing AWS content (modules and plugins). This collection contains the `module_utils` (shared libraries) used by both collections. Content in this collection is included downstream in Red Hat Ansible Automation Platform. -Code standards, test coverage, and other supportability criteria may be higher in this collection. +Code standards, test coverage, and other supportability criteria may be higher in this collection. ### community.aws This collection contains modules and plugins contributed and maintained by the Ansible AWS @@ -30,19 +20,60 @@ Content in this collection that is stable and meets other acceptance criteria ha to be promoted and migrated into `amazon.aws`. ## Submitting Issues -All software has bugs, and the `community.aws` collection is no exception. When you find a bug, +All software has bugs, and the `community.aws` collection is no exception. When you find a bug, you can help tremendously by [telling us about it](https://github.com/ansible-collections/community.aws/issues/new/choose). -If you should discover that the bug you're trying to file already exists in an issue, -you can help by verifying the behavior of the reported bug with a comment in that +If you should discover that the bug you're trying to file already exists in an issue, +you can help by verifying the behavior of the reported bug with a comment in that issue, or by reporting any additional information -## Pull Requests - -All modules MUST have integration tests for new features. Upgrading to boto3 shall be considered a feature request. -Bug fixes for modules that currently have integration tests SHOULD have tests added. -New modules should be submitted to the [community.aws](https://github.com/ansible-collections/community.aws) collection -and MUST have integration tests. +## Writing New Code + +New modules should be submitted to the [community.aws](https://github.com/ansible-collections/community.aws) collection. + +For new features and bug fixes on existing modules, +clone this repository and try to run unit tests and integration tests by following +[these instructions](https://docs.ansible.com/ansible/latest/community/create_pr_quick_start.html). +When you get to this part: + +``` +ansible-test integration name_of_test_subdirectory --docker -v +``` + +Run this from the `tests` directory of this repository. +Substitute `name_of_test_subdirectory` for the name of the relevant directory within `tests/integration/targets`. +You'll get this error: + +``` +WARNING: Excluding tests marked "cloud/aws" which require config +(see "/home/dev/ansible/ansible/test/lib/ansible_test/config/cloud-config-aws.ini.template"): ec2_group +``` +This is because the unit tests don't automatically detect the AWS credentials on your machine +unlike plain `boto3` and the `aws` cli. +(Typically because they're run inside Docker, which can't access `~/.aws/credentials`. +But even when running tests outside docker, the tests ignore `~/.aws/credentials`.) +You need to explicitly create credentials and load them in to an Ansible-specific file. +To do this, copy the file mentioned in that error message, +into the clone of this repo, under `tests/integration/cloud-config-aws.ini`. +Modify the `@` variables, pasting in an IAM secret credential. +If you don't need the `secret_token` (most IAM users don't), comment that line out. + +You can use an AWS account that already has unrelated resources in it. +The tests should not touch pre-existing resources, and should tidy up after themselves. +(Of course for security reasons you may want to run in a dedicated AWS account.) + +If you're only writing a pull request for one AWS service +you are able to create credentials only with permissions required for that test. +For example, to test the Lambda modules, you only need Lambda permissions, +and permissions to create IAM roles. +You could also deploy [the policies used by the CI](https://github.com/mattclay/aws-terminator/tree/master/aws/policy). + +All modules MUST have integration tests for new features. +Bug fixes for modules that currently have integration tests SHOULD have tests added. + +Once you're able to run integration tests for the existing code, +now start by adding tests in `tests/integration/targets` +for your new feature or tests for the bug(s) you're about to fix. Expected test criteria: * Resource creation under check mode @@ -60,22 +91,48 @@ Expected test criteria: Where modules have multiple parameters we recommend running through the 4-step modification cycle for each parameter the module accepts, as well as a modification cycle where as most, if not all, parameters are modified at the same time. -For general information on running the integration tests see the -[Integration Tests page of the Module Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#testing-integration), -especially the section on configuration for cloud tests. For questions about writing tests the Ansible AWS community can be found on [libera.chat](https://libera.chat/) IRC as detailed below. +After writing the tests, now write/modify the module code, typically in `plugins/modules`. +Don't forget to add [a changelog entry](https://docs.ansible.com/ansible/latest/community/collection_development_process.html#collection-changelog-fragments). +Then create a pull request. + +If you're struggling with running integration tests locally, don't worry. +After creating a pull request the CI bot will automatically test for you. + +## More information about contributing + +General information about setting up your Python environment, testing modules, +Ansible coding styles, and more can be found in the [Ansible Community Guide]( +https://docs.ansible.com/ansible/latest/community/index.html). + +Information about AWS SDK library usage, module utils, testing, and more can be +found in the [AWS Guidelines](https://docs.ansible.com/ansible/devel/collections/amazon/aws/docsite/dev_guidelines.html#ansible-collections-amazon-aws-docsite-dev-guide-intro) +documentation. + +For general information on running the integration tests see +[this page](https://docs.ansible.com/ansible/latest/community/collection_contributors/test_index.html) and +[Integration Tests page of the Module Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#non-destructive-tests). +Ignore the part about `source hacking/env-setup`. That's only applicable for working on `ansible-core`. +You should be able to use the `ansible-test` that's installed with Ansible generally. +Look at [the section on configuration for cloud tests](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#other-configuration-for-cloud-tests). +For questions about writing tests the Ansible AWS community can +be found on Libera.Chat IRC as detailed below. -### Changelog Fragment -Once a PR has been created, make sure to create a changelog [fragment](https://github.com/ansible-collections/community.aws/tree/main/changelogs/fragments). +- [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) - Details on contributing to Ansible +- [Contributing to Collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections) - How to check out collection git repositories correctly +- [Contributing to Ansible-maintained collections](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html#contributing-maintained-collections) +- [Guidelines for Ansible Amazon AWS module development](https://docs.ansible.com/ansible/latest/dev_guide/platforms/aws_guidelines.html) +- [Getting Started With AWS Ansible Module Development and Community Contribution](https://www.ansible.com/blog/getting-started-with-aws-ansible-module-development) -For more information on changelog fragments then refer to the guide found [here](https://docs.ansible.com/ansible/latest/community/development_process.html#changelogs). ### Code of Conduct -The `community.aws` collection follows the Ansible project's -[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). +The `community.aws` collection follows the Ansible project's +[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). Please read and familiarize yourself with this document. ### IRC -Our IRC channels may require you to register your nickname. If you receive an error when you connect, see [Libera.Chat's Nickname Registration guide](https://libera.chat/guides/registration) for instructions. +Our IRC channels may require you to register your nickname. If you receive an error when you connect, see +[Libera.Chat's Nickname Registration guide](https://libera.chat/guides/registration) for instructions. -The `#ansible-aws` channel on [irc.libera.chat](https://libera.chat/) is the main and official place to discuss use and development of the `community.aws` collection. +The `#ansible-aws` channel on [irc.libera.chat](https://libera.chat/) is the main and official place to discuss use and development +of the `community.aws` collection. diff --git a/ansible_collections/community/aws/FILES.json b/ansible_collections/community/aws/FILES.json index 7b24063c0..6bc607963 100644 --- a/ansible_collections/community/aws/FILES.json +++ b/ansible_collections/community/aws/FILES.json @@ -64,6 +64,20 @@ "format": 1 }, { + "name": ".github/workflows/all_green_ckeck.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dd88632dd1829b9ffa497fdc464a6ae0bad9a11dc474ec0da0f97b8598bd82a5", + "format": 1 + }, + { + "name": ".github/workflows/changelog_and_linters.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "466994abfa3234b3feb307742125dbe26f929c9762461706c907e4cdc8c0a424", + "format": 1 + }, + { "name": ".github/workflows/docs-pr.yml", "ftype": "file", "chksum_type": "sha256", @@ -78,6 +92,48 @@ "format": 1 }, { + "name": ".github/workflows/galaxy-importer.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "412a395e15c087fa3524eb1cbb4d94f8836f0fe174c2f93eb3883978355d3aa3", + "format": 1 + }, + { + "name": ".github/workflows/release-manual.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cdc8148954ef8340ea4aa9e56b6df226731e033c5b1a40b052a3e3b15e87bb6", + "format": 1 + }, + { + "name": ".github/workflows/release-tag.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac9fb7d547e119a1cb2981bf497b72dd26faa4c78ddd1c124eccb7ed0d429f1e", + "format": 1 + }, + { + "name": ".github/workflows/sanity.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "371f8aed1f995954dce749b4017b61c7aabac004e0d3123db97e691b48bca918", + "format": 1 + }, + { + "name": ".github/workflows/units.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d2264d8d520315e56ca91fc4c1d6e996c18a7c0115dd47714571528c6f259a1", + "format": 1 + }, + { + "name": ".github/workflows/update-variables.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "757a9b2c25111bb216f6a9c2c9fb2e846c792cdf7eecc1182b2cf1f659e7ef72", + "format": 1 + }, + { "name": ".github/BOTMETA.yml", "ftype": "file", "chksum_type": "sha256", @@ -123,14 +179,14 @@ "name": "changelogs/changelog.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "437dc32422a511e613c225a5904460c7a63a1a9364e9d4914746981ba3ac23d8", + "chksum_sha256": "71507aa348f024b04b28a249ccb60a22d09d95eaf0e2f503add687640fc648e9", "format": 1 }, { "name": "changelogs/config.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a5108e9a705d8037b5e214c95ff2bba76e09c8ff4c391c144f1f6f7a5edb051f", + "chksum_sha256": "4bb19a6205bc4e18b0d9adb6dfeeab3005860541d6fd509400fa4439b98749ba", "format": 1 }, { @@ -158,7 +214,7 @@ "name": "docs/docsite/rst/CHANGELOG.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64943fcf1cb90ec0ae493011dcbef073aec7f5c235aaf36e53890a60cbd15826", + "chksum_sha256": "5c20e11dfc1704180b5d197a68107a5a6092c324a99739646c42bb0e1a0dc8a4", "format": 1 }, { @@ -172,7 +228,7 @@ "name": "docs/docsite/links.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "815bac1d202f434e272fa930267244b093866a43ca97140f4e50dffe59c37683", + "chksum_sha256": "d0d10fb4e0294eb17b32c582b70a50aa39d01c0a272c01818f7044ce92b77196", "format": 1 }, { @@ -186,7 +242,7 @@ "name": "meta/runtime.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2010a0b72247af5bb2c1e87452ec7a8445b59cbcd08b7d210c9c2b42d06e9341", + "chksum_sha256": "6195942600514b3a6fd22586eaba89cccdc9bc09265aff576f7a2de8346c4a6c", "format": 1 }, { @@ -214,7 +270,21 @@ "name": "plugins/connection/aws_ssm.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "62d7ec08ed815f9ddcaddedb411083a5feea25d76abc81f53b6520f97ea5345a", + "chksum_sha256": "72738ace4fc3fabddcee868e0ad6f01ae82976b1aed13319bdbe5ddef1c8e6c6", + "format": 1 + }, + { + "name": "plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory/aws_mq.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d866336a72b5d493127038989c37f8cf8786e894499040ff956188c051e5e7a3", "format": 1 }, { @@ -228,56 +298,77 @@ "name": "plugins/module_utils/base.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3773f38412eed027d84ddb7ad7517a097cc9d47068e0b693093e8b77dc906242", + "chksum_sha256": "6582878e32780af41739e599bda223caa0778ec24ac9b1378eb307e6826bb50b", + "format": 1 + }, + { + "name": "plugins/module_utils/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ae9caf904353f2952d5257bea7cb0b4a4e96668dcc9871c82809cd0b4f38f4f", + "format": 1 + }, + { + "name": "plugins/module_utils/dynamodb.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e51e3842505c9a4d57000e629ab98184f9e581f3bde4e584dcb459397700f51e", "format": 1 }, { "name": "plugins/module_utils/ec2.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d19154dd983ca303c7e1967cff80581c6bbea9aa7eda9ba9b9debfa1c1a6d3c5", + "chksum_sha256": "439eb9e1f59f2ca0cbd5cd5455dc321844d5db5118d6e0949971f56d573fe50c", "format": 1 }, { "name": "plugins/module_utils/etag.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1236f420413376bf76dce9ac19e113d270b4d70f80fc42c0902d9e7bd209b483", + "chksum_sha256": "2b7faaafc5ea73e79443a8e21eb7a73478a81ad84238bba3068b9c298d548e5d", + "format": 1 + }, + { + "name": "plugins/module_utils/modules.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a9add15b7a67440c2984c97698e58cf4f4485f67b905cb709265493280b69c5", "format": 1 }, { "name": "plugins/module_utils/networkfirewall.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "766a3b691221bf7da2a98c7690b9e9945285ba88b62ad88bc689311b46abae26", + "chksum_sha256": "96f40c4441345006478c54ec163cd0e6cc681b8b7e871bab5ec06f713a719c53", "format": 1 }, { "name": "plugins/module_utils/opensearch.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a2a9c1143a5406c9b0ce4cde80b58778c0136e4e6e5fa8805f043012ff50e008", + "chksum_sha256": "b1301aece4e36452f82dd529b966e237ba0e5c3c1739d4eb2e7ac3139a9f86cc", "format": 1 }, { "name": "plugins/module_utils/sns.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7bf414cf158ebd415be2b7b3360065a7ac65a9e21c99396397ed914e1af1b605", + "chksum_sha256": "23d475188d460dd91197220c9b601c378701c70ef31d075ca8177fedd6969b7d", "format": 1 }, { "name": "plugins/module_utils/transitgateway.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8b6682e83c3088dc467e953077eb8ad1b850f8ecc851c5d7c7dea030be0f6b70", + "chksum_sha256": "5518aa6dc8b009cf0413a4d58cad0f65d2860730d9b51b60960d9c0c52f5af1c", "format": 1 }, { "name": "plugins/module_utils/wafv2.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2b16084971bfb7e4c98b7d37d183d30675e8c07ab04af7523436fd3eb4a8dc02", + "chksum_sha256": "2b071356932d3f50ba9a2ebb68713917cb0c29c51d67c8f124770ccd244de131", "format": 1 }, { @@ -298,1113 +389,1113 @@ "name": "plugins/modules/accessanalyzer_validate_policy_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3b03a1e2a7f09d6b2b9b2ec5d742deb2ead05c703f5078c3feac604911af3e81", + "chksum_sha256": "91a5b80934e5876b2212b7e78a1e4cdb7a143be3e20d955d70c4181407825d73", "format": 1 }, { "name": "plugins/modules/acm_certificate.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4abea07d633c3f77691a1e51294c3d2c8f0e5a71c17a2fb7565c6e93761a0c18", + "chksum_sha256": "6b3a346a4f8afc01050937da428d677ffce23a70e13718bcc8619a343e5308d0", "format": 1 }, { "name": "plugins/modules/acm_certificate_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2480e877fae99a6258dd1364bbdc0536a23b99fc229c24a70213f09f9938fcb6", + "chksum_sha256": "932191c0d60b934ff3f41a41a158232f3471122d704c5d06ec13c101cbe12aa1", "format": 1 }, { "name": "plugins/modules/api_gateway.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e9d46aa61a7b727811bc4b82ff5f93bd65d66898e7f964a35c1eb52d1889d85e", + "chksum_sha256": "7f2f87711ce80f6e2e684cd102e8c6dc41940bbebe1293846b9c23b76c18cf9e", "format": 1 }, { "name": "plugins/modules/api_gateway_domain.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7a532675a592813db5220342cd0f07669e0e0c4cd206a71e1bc7f1afe54d0d89", + "chksum_sha256": "a134bf3499698afa8bd0be89b20461215fbf6edf601c943b7a0bdcbe2f8d689d", + "format": 1 + }, + { + "name": "plugins/modules/api_gateway_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41c938185719ace530e2762dad486d389c1689d03d288c340e38d40773e9e9f9", "format": 1 }, { "name": "plugins/modules/application_autoscaling_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3cca972ea4fe88a59ebee02e13ca47ae3c8ac16262388e17905caf8cf5d44c9a", + "chksum_sha256": "d8c1255b76cb821f38ea5f82ddcb05e495b910b85bbf506164b257d1f99dda73", "format": 1 }, { "name": "plugins/modules/autoscaling_complete_lifecycle_action.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "57e9d124e3df8a236889b67e6672f15b13bec2d78418fddc7e0e49b0646249a1", + "chksum_sha256": "52f7cf6588c9125e2442b0e8468aa0ba087861cb258698c0a82df4d2c5d8eea9", "format": 1 }, { "name": "plugins/modules/autoscaling_instance_refresh.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ddd64bd63aec61c9d9eb89318764ba1d5d85efc0cd715404f1d5c441842cc15c", + "chksum_sha256": "6ae892c2473bf0816f187a0b014cebf3fcc253d3f241825adc754d91f01f6079", "format": 1 }, { "name": "plugins/modules/autoscaling_instance_refresh_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4017c039bf9dbae7c2e0b6e2025ab0113ca0f4ccd1953f6ef7dae6d8c065be1d", + "chksum_sha256": "c372ba8ddec9bc7f0a4ab66630137661b6027d2502f85d21330675e2849f5895", "format": 1 }, { "name": "plugins/modules/autoscaling_launch_config.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d0873afdd99f6c65152b20f881994cc427e76df8d9c7b016e1844db5b10fdc19", + "chksum_sha256": "30a3e098336211e85205b737758c55c7b5fb6913d9f1e5edcb24d899199a6239", "format": 1 }, { "name": "plugins/modules/autoscaling_launch_config_find.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "12084b66d0dbd1d2164ca8bcfad72bba3ee8be63c8a1dd9fc7d503bf1a4c7f98", + "chksum_sha256": "fb6e4f2d39dbf4f66307fbac1e09bb469aa111d8b39227d5f409c8dbfb096afe", "format": 1 }, { "name": "plugins/modules/autoscaling_launch_config_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e198b4d78942841bdf4fd8153f062e8e524863e43c85d691bff9bb5aa104bc36", + "chksum_sha256": "03889bbee2c40eef5a79b958c1adc6acf1dd1385e63bd2cec4a3c73e701d4a18", "format": 1 }, { "name": "plugins/modules/autoscaling_lifecycle_hook.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "790375cbd689914a75ee46fda66797a4ed8bbf4b749f732a196ab0d85764afff", + "chksum_sha256": "7d277af4f7909bf2e62d03c99fd074ee9410b23c5dc830de1fab6560a5722770", "format": 1 }, { "name": "plugins/modules/autoscaling_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "70a22498e64860a1051d55f9b973def78cc8db2ff0d681e4aea570afa93058ff", + "chksum_sha256": "30154ce721a8670bab2915d80700cd6dfc73c6713a036be07b22edaabef8512e", "format": 1 }, { "name": "plugins/modules/autoscaling_scheduled_action.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dc0cd4113cb779bb53e7c6a91e59bdd8ca5c2bda38bf87db416f59f3624c0149", - "format": 1 - }, - { - "name": "plugins/modules/aws_region_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "054292965f42085f31bb80aecb9d907a6a003a2acb0cd1c136e4fc717693ab8a", + "chksum_sha256": "82533ffc50b4a218ffbba3e1a611f0f1a0eb928171910ad8eafe9c87e7292569", "format": 1 }, { "name": "plugins/modules/batch_compute_environment.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aacf6a0f458ed02be8d8e30ee9522a0cf0f7bbc65c96fdf747bc2e229d021d2a", + "chksum_sha256": "3cd1c9117814c7437f12689097f3c6667d58aedb65f1086fce93ba62917eebcb", "format": 1 }, { "name": "plugins/modules/batch_job_definition.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d6ceea456d86137242b06f099ab08ef12a3ef83ec12acd3b1201b1ea2fb4f4c1", + "chksum_sha256": "42c9e000ee95ccc76a9af0575d05b4fafc2c8e7c36dc43df0e60ab1b0cd972ba", "format": 1 }, { "name": "plugins/modules/batch_job_queue.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5db3c68224a145d267a4ac879588f5cccfaba09db54b06631311fc624bccda80", + "chksum_sha256": "9d2915da3b913821b7f7bf466905aa7696ded2b4fac40d8e8e83e75250ce8385", "format": 1 }, { "name": "plugins/modules/cloudformation_exports_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c8c0fadffffaa45755a0f37e76921b0fafc8a442f85cf9be1955cad6a286b5f9", + "chksum_sha256": "60335fa25825552da02f00c3f3ee40d2aae54d692ba92ed52a548f63c62de4b0", "format": 1 }, { "name": "plugins/modules/cloudformation_stack_set.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "93b5ec32d38b341b1a1655a8b71c9bfc16997a6a75d5bc45df1bac6ecbd34a91", + "chksum_sha256": "a70f91c9c1dbf233555e8168028d19e6b83184dc026eb9b9ffdb6d845034fb9a", "format": 1 }, { "name": "plugins/modules/cloudfront_distribution.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aa05169646699efe9ecab565cdfd70f9a8b4ecf85a432ee1838a0f14bf87b119", + "chksum_sha256": "bec37c5ef58dc850cf02dd989522f212c4245e6776197c220fe370133958dd86", "format": 1 }, { "name": "plugins/modules/cloudfront_distribution_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6f280c944ee9268997032ad42f00dc51a5d308f9d033172729a3f01ec008b6f8", + "chksum_sha256": "0ac23aa16fd2764eba4f535f19761bcbd7540901bd0eded24fd6cfecc49afec1", "format": 1 }, { "name": "plugins/modules/cloudfront_invalidation.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2cc2be4131d7193f6a03b3abc85da0b000a3615a063209aacd3f120baefe6512", + "chksum_sha256": "a2472efb0be3c742a9f24082d00a1d424f1404a3cacaad65100f6254ba5d8205", "format": 1 }, { "name": "plugins/modules/cloudfront_origin_access_identity.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e0574c3fccdb4351749171424c2cf06916c38e61058e5c375867a84eeb1a9c98", + "chksum_sha256": "3fba4261fa7195983427e52ff1c9d4a0a42487ef86d9cfcb2764999acccbe85d", "format": 1 }, { "name": "plugins/modules/cloudfront_response_headers_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aa2783c329492ba3c56005b6addd21761e8fb3c84031b1ad96c923e875067e56", + "chksum_sha256": "17c735bd277e79e2cab57ab8708d77e56fa8798900df8709b640682701f1ac4b", "format": 1 }, { "name": "plugins/modules/codebuild_project.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "65d1a0b0a08cb4a2abc8974e58709bf8e06e3cdd4eb76ff12e43833bf1c04d7a", + "chksum_sha256": "fec10a83e153383a3950dade221fcd8ab9a20fa0de92db78d95a4f20df320f94", "format": 1 }, { "name": "plugins/modules/codecommit_repository.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "68fda5f55704559ecdbc477f338c16a9b3c2645a1c91b38668d30de1576fd0d2", + "chksum_sha256": "21f047bcf54bdb1f4676e38a711cf8233ade685b836e2ee73ee6c4557bcb9af7", "format": 1 }, { "name": "plugins/modules/codepipeline.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ddbc5948f572462620c906acd56a8adc9c3aa5acbfc82f14471009471ef507fa", + "chksum_sha256": "cbffb81670cd1fdf321e4574d26b5c933a1860a003fe5ae095ff63c9196f1379", "format": 1 }, { "name": "plugins/modules/config_aggregation_authorization.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1c9b127ebdcd0114fc31c0b91824361454b1bd460dc84cf436c994085aac9327", + "chksum_sha256": "a6b54e57b64e0d06bd56ccc2a01e4d362b235220ba129f8c1759f5e3ab24042b", "format": 1 }, { "name": "plugins/modules/config_aggregator.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a895c00d5b4aff172cf884facd1c0da19346e08ce322b1b4ba20609cf0bb11ab", + "chksum_sha256": "3ddb3ddcdb50e164fbd9348739b0a4fc8b550f95f0986a00206473db90b54961", "format": 1 }, { "name": "plugins/modules/config_delivery_channel.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "af19b1718ef99e1ddffa558a247a9ba272202b20b9511bce337aac5a4bf68a06", + "chksum_sha256": "db7aa8c1ba89189250af04f1e3cd7149096b93d50cfceb2bd3e0e437d545c089", "format": 1 }, { "name": "plugins/modules/config_recorder.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5154567bf7d6ed270d2337eb57d8e5bc64a7626206f1a1632ab4ec85a40f5b43", + "chksum_sha256": "6e2810e8390a53d33b0c5c20bb16695b806fc6ecdf736ecfd2ac6f0a6dbe38fc", "format": 1 }, { "name": "plugins/modules/config_rule.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8b80425a2f13423ce3f4aeaf949c41b880962f2f22110e7ad5236df1b6b1c9f2", + "chksum_sha256": "f91e2d9538bc85df593f2ce1b96d3d668c1b80611e3bf8a293dd5af5c405c9c8", "format": 1 }, { "name": "plugins/modules/data_pipeline.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ea36eb5952ee7bd9b31ada4d31b30f365f02fb542166836d2af87e118c3ab104", + "chksum_sha256": "55b442ad3390de1f976c8004d575291413169b5b150cd106df7c0a2ec9777a92", "format": 1 }, { "name": "plugins/modules/directconnect_confirm_connection.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e98ac8cc28ea344fb517aee21adf7df4f11e84cd1685bc5e4258432d2597e1c5", + "chksum_sha256": "37672d0dfbdb9e261b9104ae072965bae969e082438c1ebd08e729c70e3267cc", "format": 1 }, { "name": "plugins/modules/directconnect_connection.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "98d48050da33e291adbc54f00481a7b5342941e4a2fd4a7acf2d9060cfb5f336", + "chksum_sha256": "6b22576c6493e627710238dac7ef944edd1ac8668360d134e7a01e00c033a8bc", "format": 1 }, { "name": "plugins/modules/directconnect_gateway.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "608d7e967844f304fba0e8e516f4330b7b532b99d81286381d4cce32c870ed5a", + "chksum_sha256": "b9228e0694120ee11917e2eaba46653a3b9efee19616864385326c3f3ca8b2d2", "format": 1 }, { "name": "plugins/modules/directconnect_link_aggregation_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ef1c4fd126291fdf4dd9da4b7efda630afab53ee10199e6de321b51d019899e6", + "chksum_sha256": "133b009324224cac488252e92f13b687f7faa8d4234f1693194a9e3ee40d029c", "format": 1 }, { "name": "plugins/modules/directconnect_virtual_interface.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "334afba11f87ad5040b5dc355f3b1655d07411f3733d9676cb290774f77ec08e", + "chksum_sha256": "71a217479ab14dae72bf6dc83255bfda191cc483d4e5047093889eff62dad65e", "format": 1 }, { "name": "plugins/modules/dms_endpoint.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2198a8a4ec06fbaba11e437960b0df16bab13be4c38e33b15cd6e6285a089092", + "chksum_sha256": "6273acbe3d3265cb9479be2a0ef15e4820567096bfc1239385f701b3913d6af6", "format": 1 }, { "name": "plugins/modules/dms_replication_subnet_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7ae257dab5205b233952cd56ec9bba4dd1d094d8ad5b870349c8f486ae0e611d", + "chksum_sha256": "b992d63836770289a6de831b958b43c22924d8070b58dfa62232b3463af29687", "format": 1 }, { "name": "plugins/modules/dynamodb_table.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9241e566a64c845475cca35cc8cc6ca5074079251056aef3461ea17563cf36b5", + "chksum_sha256": "58334de020e1e49cf8e2837f3c6d4c24ec0ce4e6f99d53e7da5fbb0076ab5cda", "format": 1 }, { "name": "plugins/modules/dynamodb_ttl.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a73cec1ba6db8872e212c217a4426179d80c053db8cb2230e79fef32c20eb2aa", + "chksum_sha256": "9a01c932f7e26e50d742fa3dfb77170c977c629f81f5020eb2299434f5770d4b", "format": 1 }, { "name": "plugins/modules/ec2_ami_copy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d6e4e1ab4a4490cf18a7dcd59ef085ca5e57d6c1691bb1ca4db695566bf23f05", + "chksum_sha256": "49feb92f4f3fe0ac592a4ca5f0f297c1b921ba5937f33f5ab73c3804b0861aaa", + "format": 1 + }, + { + "name": "plugins/modules/ec2_carrier_gateway.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "468bf4e88db6fbe32e01a595a5de331484aa9a441e4ce760bb954108ea0f1a3f", + "format": 1 + }, + { + "name": "plugins/modules/ec2_carrier_gateway_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92a3475cb1fdd5193f5095ab7e79df0fe25f0810401d1a60240fa24c7c706bdb", "format": 1 }, { "name": "plugins/modules/ec2_customer_gateway.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "16c90f2ab42b5f95ed709dbcc96e9d2a1c8b1a02a372503227b17430066a620f", + "chksum_sha256": "f2b1486f2bcc3792d276b35b8efd32b430c254a25914e41a93e6a86d21678e17", "format": 1 }, { "name": "plugins/modules/ec2_customer_gateway_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b7913db7fd5e857f1006ddc5026b732b5d1bd3ad1ad3926020289c4ed80e00f8", + "chksum_sha256": "d99df0970aad4616bc7aece745d143b82963c824e1d0e0ef2b0ae4aaa83e249d", "format": 1 }, { "name": "plugins/modules/ec2_launch_template.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1bfc9cb0770930cf7212d81bffbb85369dca777fdece5d242aaa134081bb8fd3", + "chksum_sha256": "a67cd95fb55350fd82238b7a4a96495a14c3a580faed5e5081fa63cc8288c4e3", "format": 1 }, { "name": "plugins/modules/ec2_placement_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f7e67cb1d68e1b55fcb26c6269dcd772021d9c7fc8c8c3b38f90cd44f42bbfb2", + "chksum_sha256": "74f9cd9cf6ecd68848380dd936c44f27327b65185ed39aa4f1ca31696bb086ef", "format": 1 }, { "name": "plugins/modules/ec2_placement_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4fce70bf0edd4edf8330cef39a07550d1587c583031745e30c51fcb6224c87ea", + "chksum_sha256": "c2659207293862dfc5e976966f59c90abc46a13cfa2c48643df4296f5287dcf1", "format": 1 }, { "name": "plugins/modules/ec2_snapshot_copy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "72d2274835255197be1fd94547f938f0e5fc7e0441f1f3446aad68573c3eee43", + "chksum_sha256": "6a6ed898152fac04ef40b39ca0f8003aadeee92418b96584eb6d1a10c28f5753", "format": 1 }, { "name": "plugins/modules/ec2_transit_gateway.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d7b028f8679916db50ca340f7f48d2b8504bb12d107dbd88b6d1f466f28e776f", + "chksum_sha256": "1cedc060e15ab793f1c5e2878b948d1803cac0bdff9b682cad66d6d6de3e1007", "format": 1 }, { "name": "plugins/modules/ec2_transit_gateway_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "633a9320655e039e22bbb917c0e0ff5c486aec9e9196e134284931d1731accda", + "chksum_sha256": "9ca39e62d82cf9008706a2c99d3de1f0f8e4302726538cb92e73ec740d673416", "format": 1 }, { "name": "plugins/modules/ec2_transit_gateway_vpc_attachment.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3650b1ea6a795cc7d93d07c6ce95b62173070583f6634202d43b144804cbe0c1", + "chksum_sha256": "e6d5968564159c3c27dc3f1c491e6a0d045950742ef1bcafcb23b9276430a1f9", "format": 1 }, { "name": "plugins/modules/ec2_transit_gateway_vpc_attachment_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a688b6a8d2d277fcb4a1656b2fef30f5566fa565e1ca14862245036bc5c1d17e", + "chksum_sha256": "30d255db13b600aadbe0d23bf1ba8bddbe46dad80b19c725b7f49db3e3703ae4", "format": 1 }, { "name": "plugins/modules/ec2_vpc_egress_igw.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "003c1def5e498a76b0ea37d993f6a2777e0a6994c799190493181acc185990b5", + "chksum_sha256": "d21b72b863731cb5d7a612bfc7d86f952ea53b467f256b33ced2cd078d594699", "format": 1 }, { "name": "plugins/modules/ec2_vpc_nacl.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6e5eb11d4978dc5e84e91b1c3da4e8c77d0f1bb232be469d12d7937cea044926", + "chksum_sha256": "5e82dac9601cde049fd2399419a723f08cd9d048942a824a22640eda2c49f69c", "format": 1 }, { "name": "plugins/modules/ec2_vpc_nacl_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4dab5c8cdee41ee934141b817bc153b918d0dcb724bcb9b786c3ffc7cd49dfb9", + "chksum_sha256": "e92a9df84a5835644da2e2e8873c52210eb9ea8cd87a8955813469101590ee09", "format": 1 }, { "name": "plugins/modules/ec2_vpc_peer.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f0468a25317d2dc9b77633e5dcb4b189e5b11e6c8a620b7851d78c0be7da9cbe", + "chksum_sha256": "7dd064fc9733f8e75d1d20abe1b70a21a90d5b2d33bc162da2471fdd68f14062", "format": 1 }, { "name": "plugins/modules/ec2_vpc_peering_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8d7b6bbbedd3a4ec822e1a54a5a47a2f17144a901550e13d5b25553af8caada2", + "chksum_sha256": "9acf092eb29e6aac666f640dc864478b441817b4597521227ebb7f572a0af76a", "format": 1 }, { "name": "plugins/modules/ec2_vpc_vgw.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2a39adce133df56debffcf0cefcce2f8d597f97346cade741da6e52c1051a752", + "chksum_sha256": "d21bdf9feb57c74235d7ccd0c61e60836ab98a57c9d6aa6f791f5755e51c0e95", "format": 1 }, { "name": "plugins/modules/ec2_vpc_vgw_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "98582700e17ec1b999a4f8597498c38c7afdb0611edb4eddee4f98bc044e9779", + "chksum_sha256": "0dac532670b6f61d14c8727451dc19cd3411ef8439321832fa04bc315474a59e", "format": 1 }, { "name": "plugins/modules/ec2_vpc_vpn.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d6e77f95ccdc077bfaab85340b96e0f8c0ff9eae2e69ac423ac451218178df35", + "chksum_sha256": "8fb57b8a50e58d7dac665f493e678f0f184c3a74535f50daa34d18e19709d4e3", "format": 1 }, { "name": "plugins/modules/ec2_vpc_vpn_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e1f9ddcd0ed828a739a74147769007f8bd8536f64b8b6889dd68da554c94ec68", + "chksum_sha256": "f71d3c29876e7991b4fc9549051a8cb4ecddb1f0b4f0f8c6ef370cae052a43e9", "format": 1 }, { "name": "plugins/modules/ec2_win_password.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "191075a30e6e01642f1d8e0d919e7fd94b5b75ce8381edd6cb7dc689a1a58bf4", + "chksum_sha256": "6edd69b1e1643216dc68ba5970924c87a1e54540f43d1564308f445dff0e290d", "format": 1 }, { "name": "plugins/modules/ecs_attribute.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "262e86caf78bb6c97570acf2760288d1338d9120fd1f2b3bdf4c5d07238958d8", + "chksum_sha256": "6df9cca907a46e2d269b4b7bfc4617311d8cec589137e6f0e9dbff4c671eb8d7", "format": 1 }, { "name": "plugins/modules/ecs_cluster.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d58db0e241d325c56222646603eaec461ce025268fa0b1885229dc3795cc256e", + "chksum_sha256": "717a804b4d98129db71c2dbca9ec78a10c7415e57f8ed3a9811bf114cd7c247d", "format": 1 }, { "name": "plugins/modules/ecs_ecr.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1b1d8972cfc4ec26da49c1c3555f98ca4763f9a36cfe9d34b9ae3fe32d52e8d2", + "chksum_sha256": "46ce9f12421b38bb7f21636c6ce6179948be092fde76276a13aab461603548be", "format": 1 }, { "name": "plugins/modules/ecs_service.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3ec540ff8ef351513945561eac7769dfc0d4b19893b61307545909d016ab9844", + "chksum_sha256": "ffb7dd1444fdbaa495d7429b4aaeed4ac117cf04a54430b33493b28b7d6da389", "format": 1 }, { "name": "plugins/modules/ecs_service_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3e7c77cb27248d5e5feeb0a3ca66523724825c674b2b260d878c8c7b0943e575", + "chksum_sha256": "a470e906c2772bc767b7ec43d5b75f242fe4395afe3502391f9b09fc6fed5d0e", "format": 1 }, { "name": "plugins/modules/ecs_tag.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d2c954dd57cc5e2f55281775b476d2bda6920e9ac3ef404c1c87dac1ae603b64", + "chksum_sha256": "09ded4d5c5c8ab5c19e451c6bbb65fed4a3337a0771c6adbdb9007a2fe092074", "format": 1 }, { "name": "plugins/modules/ecs_task.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2f63242a5a0c0f7a5e4cff7d0dc3fc92595275673cecf1d58100aa186339c9d7", + "chksum_sha256": "fa98b35b6b771225e6909bbf5fea572662c2500e2954895bc046be910de8ca14", "format": 1 }, { "name": "plugins/modules/ecs_taskdefinition.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "293ca0b43185b449f8fae49050fb7000e0843a9965aeba797ec0cf2a839a555d", + "chksum_sha256": "e62d9a1a1891ba8338158a7d043f79a112f35e27a58677f2f8f77ec1b8fc0d07", "format": 1 }, { "name": "plugins/modules/ecs_taskdefinition_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dd11a4a67918661799b119bc3f27022a0aaca8aa51ae3e4781676131ee2188d8", + "chksum_sha256": "b118ca5f6b160600641be2f803aa40c0c79e65425a1fc64e8d910469e3395690", "format": 1 }, { "name": "plugins/modules/efs.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "633349c53467a9bfaf28a69273a74f6b303d4deb16687579e3c67a8bec6c03bd", + "chksum_sha256": "b5dc61da6d44ae7f0268f19dbd7556a210fbcf67292e7ec9ef2c5e7818a19de0", "format": 1 }, { "name": "plugins/modules/efs_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "29b2ec0e4fdc9fe25863d4f0507a25096987dddff80a3e6abc50b4c35dbb12fc", + "chksum_sha256": "21f1c2140a6de6868eeca7a6ba307e4e12185a8e90a8d4b0cf826abbac63fb87", "format": 1 }, { "name": "plugins/modules/efs_tag.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3ecb608fd6d662a6fcf73fad6ff5d0396f54e66951bca0ca656bae6cc3aa9fb8", + "chksum_sha256": "0aedd58f21e32af7f0231dae6a756c57a9b72d5237f359918c3fa2278859eafe", "format": 1 }, { "name": "plugins/modules/eks_cluster.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b51753c6da9946a352e513d85b4adb29c8b780c9a13fd60e681bd1c06b35d4e3", + "chksum_sha256": "8005197d778382c7a5fec9af4aca70ab75319b7d182822c6af60ae05aa8e6c99", "format": 1 }, { "name": "plugins/modules/eks_fargate_profile.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "65f4e019649d750270d7becd94e01c22228e0fb0b33e21d51b00733956b25c40", + "chksum_sha256": "4b794580bbfea3629ba81541ea598e26431abd6a624059599ef597f5540dbf5f", "format": 1 }, { "name": "plugins/modules/eks_nodegroup.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e2a98d3f03802481c7362df750f99cf4fb66b7ae7004cb40c0f72866f4412488", + "chksum_sha256": "580cbdc0cd5492631137a3bcb4e4e9292ada3498645ffc2bcfe9dec70e1512d9", "format": 1 }, { "name": "plugins/modules/elasticache.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4a92f641d9542c97f9403cf019a627001d59a36c114412a1d2e43f01f28cd692", + "chksum_sha256": "57178a9afa917545ac9b31b025f60e3d3c8b1a9d0952c06b95705133aaa3f69a", "format": 1 }, { "name": "plugins/modules/elasticache_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7d0485e71308c153a5d60e0593dd72d529418b1feb82df839535d86939de768e", + "chksum_sha256": "0c3d2bc62db937a6efce1ac23ea08f6a485e550067df473cd9e01f964ff8c6c5", "format": 1 }, { "name": "plugins/modules/elasticache_parameter_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f59dbbe07caaade07614201b023f634c824e4e41b0a5b828f04ce29e54b25a3b", + "chksum_sha256": "93117fb43d0b04c2d6cb90ae954561f1fb448e6344b4e1ccc6cafca43d31e1d4", "format": 1 }, { "name": "plugins/modules/elasticache_snapshot.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c08013e0a43c183c75f298adcd0927353f2f6be8a037edf7343b30bdce7d6a05", + "chksum_sha256": "c7b626d1406c64f8c97cdf23435f49954bad0acd20965adfcc4e25ca20717820", "format": 1 }, { "name": "plugins/modules/elasticache_subnet_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9fee44274b2290b2d3e819b9584c5328b683e6219e2f579fa86b6ddab20a2a38", + "chksum_sha256": "29c531f22d8ac0bd082e4f1f1a6a380b2ca7cc501be261fefc55986b22fda859", "format": 1 }, { "name": "plugins/modules/elasticbeanstalk_app.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6e57e46d717b5d41503a84f14dcaa7ed4d7c9eb38aaee0534372654748f4983f", + "chksum_sha256": "5823a638f54cb2a502ff69ed71cb198becae490f08842bda61cbd430409c2416", "format": 1 }, { "name": "plugins/modules/elb_classic_lb_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0b072c0d4fa0acbd980cc1e6a47a75e1f30ad5de67201676d1c253695cde53db", + "chksum_sha256": "cb867e0ae455259e39258d4b2364633d251a2fdbcc63850e4777281f453711e8", "format": 1 }, { "name": "plugins/modules/elb_instance.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e05c316f9cd70fd9558e646458a57acce9c9bc94b9884b31049b7ddc727fff7e", + "chksum_sha256": "bdf03ed3b98544a4261706d40b91f8b53da1a049afb39a09a9037de9c6d0efa3", "format": 1 }, { "name": "plugins/modules/elb_network_lb.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "280a8f2cf1c26e031af14c990786e8c0bed286c69a643079831f622811f64cae", + "chksum_sha256": "ae3cf0fe9f7072739bf7828b89dce1586b1e042a064c7aa7908e0db68e1e3077", "format": 1 }, { "name": "plugins/modules/elb_target.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cbd59326beb6d572dfe319b14de9ad8fda0e68046f07910f8f5f3ee66aa5069d", + "chksum_sha256": "c8c5a6ff94ad9933d326e71d052cd5cdbfc573f9ca7696d3698ee649d362f424", "format": 1 }, { "name": "plugins/modules/elb_target_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "21a57c01e9d05f78d8e7f5ba1ab267656398d8a1b59efcec02c99abb3013cab7", + "chksum_sha256": "d7355cd7e58646b7bdd84748009b1c313e8754fb8fe0f0e8adc0696c5d830e59", "format": 1 }, { "name": "plugins/modules/elb_target_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "87d4087d2d77796f59ffacfb6e10e5c6842e6a2599f3d613e04557865be9d77f", + "chksum_sha256": "f889bad86ff6af11220a83bf3a40d9542429388f1b018ba8261afa52a131eb1c", "format": 1 }, { "name": "plugins/modules/elb_target_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "80bda0b10537466479746e5d11203e41c062f19019a5dcdce442cb2f3aeba27d", + "chksum_sha256": "956ba9cc26be1c3608ceb3539126e8e19d82096bd4564523e21e0c3067297ae4", "format": 1 }, { "name": "plugins/modules/glue_connection.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d2c681d7553cbeac81f5b98c0f6e2085bb030ff27ead1f7e8352af0ed4d8b324", + "chksum_sha256": "8808e4e2e90365eb34abd7d5496058406abee0e8a7f83241b88ddeeaab66c316", "format": 1 }, { "name": "plugins/modules/glue_crawler.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0ecc3c19d2fa2134ec5d73425a2719893a1058c834e2975da8558b84b72112a9", + "chksum_sha256": "ee0f7868f918a610252e59706b1efd929aa089a9bd6c8822cdd0a41e18692714", "format": 1 }, { "name": "plugins/modules/glue_job.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7b3917b5eefd2d8d3efba6cf99eed9a3000c7b20494dff76ddfa46a70d022054", + "chksum_sha256": "b9d5d95b21a49c62e1f86bd8f5e30ab45a9ec0bc81dc680a5f3233e712833108", "format": 1 }, { - "name": "plugins/modules/iam_access_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a4670292f901959f4985fbdd702d127c708717b3b9e3bbfc8122e2829633f5c9", - "format": 1 - }, - { - "name": "plugins/modules/iam_access_key_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2769addeade637b0bf8cfcfc25129df28253e939746f433afbb63e3773b998f6", - "format": 1 - }, - { - "name": "plugins/modules/iam_group.py", + "name": "plugins/modules/iam_saml_federation.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "427c30b17fca1d67adec7b8dd6cdf28835062e35e4ea43372295602b86c11fe7", + "chksum_sha256": "4bd0088d72d37180886f21da3542c8ce7949b83677d3f53523bc019d4dd2d658", "format": 1 }, { - "name": "plugins/modules/iam_managed_policy.py", + "name": "plugins/modules/iam_server_certificate.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "192a68f51deba569835389181460a12f83242b6d2a542a6a914cec99b626dfbe", + "chksum_sha256": "d38e7e31661fb0aee3c42942573f4c8a98df6bd9bf570073351b264e93cce3c6", "format": 1 }, { - "name": "plugins/modules/iam_mfa_device_info.py", + "name": "plugins/modules/iam_server_certificate_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "715af8b066c34ff069d73618990712b04b5d780ca6ba5370a9aa92aa3be51a02", + "chksum_sha256": "ed7406a4ffb74582e2d4eecd786ed7cb2ea5c84ad4cb4c15e4158c2d66898396", "format": 1 }, { - "name": "plugins/modules/iam_password_policy.py", + "name": "plugins/modules/inspector_target.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8f33138009d8bf7e8e11d81cd4d088b82b239632efb8b37c2bcc50b63bf6383b", + "chksum_sha256": "dd8d72824e24dae8cd1d95fff482a2a1868766e5436837ea3e71469c8bf7bf99", "format": 1 }, { - "name": "plugins/modules/iam_role.py", + "name": "plugins/modules/kinesis_stream.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1ce190e56a817cc3541c9cf5e68c6e9a271b8590511193fe901702b63f29bdb5", + "chksum_sha256": "a2aab3b2655fe8b79fb874a51657e92705bc51c3ae2a468b82f36cf795e26bcf", "format": 1 }, { - "name": "plugins/modules/iam_role_info.py", + "name": "plugins/modules/lightsail.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c47138c3118c33bb62c8471078ff86990127c057cabe04dbb0e6a20bdf5b53a1", + "chksum_sha256": "bd3d2daf8cdd5c2f048dc06e76bef128d83825afc47db990ed5779007712147f", "format": 1 }, { - "name": "plugins/modules/iam_saml_federation.py", + "name": "plugins/modules/lightsail_snapshot.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "844e900f889075e6928a6f8efd1d0641ca6540b14a2bf77e14599de99f6d80aa", + "chksum_sha256": "1f9ee3c0be77be010e91bebbdc6634eacb5b02f49e580d8177104eec475caf91", "format": 1 }, { - "name": "plugins/modules/iam_server_certificate.py", + "name": "plugins/modules/lightsail_static_ip.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "49b7f82a61b94c864c2bb6468f4c641eadaca1f0f2f604d05a209bd15334e9f6", + "chksum_sha256": "fd86fba01655efa861a4edbb4b68c682ee2c1cb6c52a4927ff2c1deb1cf73320", "format": 1 }, { - "name": "plugins/modules/iam_server_certificate_info.py", + "name": "plugins/modules/mq_broker.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e3b0ee06988c8f0cd2fa0af8cd1b388f9988e104f358120ea6d34d5f5cacefee", + "chksum_sha256": "ca46c2156ab5ab7de5f23540026bef89b31ff890fabda448605be028ca6c389f", "format": 1 }, { - "name": "plugins/modules/inspector_target.py", + "name": "plugins/modules/mq_broker_config.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b4f8434fd2dd9ae678ec010918fb882ce8b6dcb17ef8f1c6ed7cfb4c6cb9ae6c", + "chksum_sha256": "2728e73959eaa5af6f40932e378db4234aa7de989d3fb06689c203502d59b571", "format": 1 }, { - "name": "plugins/modules/kinesis_stream.py", + "name": "plugins/modules/mq_broker_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "84d1c2f25c82b967fb44edd2a73ad5436c3ae5db80e22feb3979787da7046527", + "chksum_sha256": "fe23c22370cdf0efc2b68cfad9eed17beb1c0f1d2870b25bd379ce149037873d", "format": 1 }, { - "name": "plugins/modules/lightsail.py", + "name": "plugins/modules/mq_user.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "847ea99d4c77c51a5bb53c2295bb12c0bf8cf3a421d285b73775d4dfb96d8654", + "chksum_sha256": "32a171b4465cc24b3f9b0636e28cb7a791c2685e00dca790ec4d9966bd552c8e", "format": 1 }, { - "name": "plugins/modules/lightsail_static_ip.py", + "name": "plugins/modules/mq_user_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "697c2762936ea292d23252a495835f3d713e0ce37d9f1fe6f47724ed10388ed5", + "chksum_sha256": "f579eb5e29fe7fa20530d9637f5cd18dcf65b09a7a113055480538468c9d0087", "format": 1 }, { "name": "plugins/modules/msk_cluster.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6754af908ab38fafdea2c2de23e5cf682d579f09ffce712cd9232c7b5a86fa52", + "chksum_sha256": "c4211d04238671d5f57b0214e55d12e541c9619b6275c120868019fd3eaf56b4", "format": 1 }, { "name": "plugins/modules/msk_config.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5b2cad825b29977b0f53cb494923b165a264a2274fd750bab6d53bbf642440c1", + "chksum_sha256": "c3653136affcfcdeb7521aff5b9abe8413d864b23c4d0aa923e951cfe4599d7a", "format": 1 }, { "name": "plugins/modules/networkfirewall.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4e754a0658b8f53307b5bf41d5a20616d92a09aaf59928d7482073bc0808de3d", + "chksum_sha256": "b03f192eb185bde65bf5cb5774657ad27af7a6ed80458af6fe05b604cf8c7a13", "format": 1 }, { "name": "plugins/modules/networkfirewall_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f842953dff0636d0d5c48d1dfd25ff9a04efbd347a77e778fe358abbf901f859", + "chksum_sha256": "6de2084632bbed197a8022c9825a8268540c8aa4614ae791c27a4157c6db8328", "format": 1 }, { "name": "plugins/modules/networkfirewall_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ef715b14aa6936b4a41ab9ac5d45f521a6a2a5bcc29282da38651ebd9320d22b", + "chksum_sha256": "b313db78d1bed198b586c3b45dcaa6fff480a92dfa6c97235aa2ea8453f1dd23", "format": 1 }, { "name": "plugins/modules/networkfirewall_policy_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "374651dee9fa00d661c269e273234c76b687445f2a5b96b00794a008f49b8ccb", + "chksum_sha256": "3bed6422a18ece0f215a4e13a26d1e6c78498edc8ae70320a9f33acfb61f276c", "format": 1 }, { "name": "plugins/modules/networkfirewall_rule_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "286e657c966f030c981d8c70fa8736d864cb300c97607348a98e8ced67c4b24e", + "chksum_sha256": "b9a890d65f42e53df5e09289a0c34b71a66e2e2975189a45b8b22500d90d4de3", "format": 1 }, { "name": "plugins/modules/networkfirewall_rule_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9c47f94128c9a55a80c1b28d8919c121f5b501a62e538fb136f7cfc1398765ca", + "chksum_sha256": "6fabc2b367796a16b091b8a98527ef4d163bae7183a79d80671d471a110fb463", "format": 1 }, { "name": "plugins/modules/opensearch.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3f8ca1c05aacb93678b956ac5afd1bd178fbd682376d00ea338a54e2c9573195", + "chksum_sha256": "59e5149a92d69f805d44e02ae2453a61575d6a09e7c329351852ebb6ac349b50", "format": 1 }, { "name": "plugins/modules/opensearch_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fa9ed71b95685a580e2433cbcf08d36a1abd0fa660409719fb6bf6e39a9bda2d", + "chksum_sha256": "8a414409b642f1ec6793d7f6872e8420f6bfdf083c4db1e5d6cf92b3dd3cf83a", "format": 1 }, { "name": "plugins/modules/redshift.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "30b250cec0c633624dd0e6b80adb6b1ac74d6e3f38e53200fc802dfeb408689b", + "chksum_sha256": "19316f40735b597e92399f19aeca9136c4a29d5bf6a4e60e3abd4e003c388a7a", "format": 1 }, { "name": "plugins/modules/redshift_cross_region_snapshots.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "78352756b9b4972fff14df17afe6c670f15900bea01390cb9238f07e68cbe888", + "chksum_sha256": "0144f93c383d3266fbd00dd988fb08903a61b9da0ac84f471750354e7c18a276", "format": 1 }, { "name": "plugins/modules/redshift_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f21b66e80da414866d2495fc0097eb5704c9f0f4c721e73fc421278d4f9226ca", + "chksum_sha256": "852e12c141e900c634b9b9eab98cbc83eab21b5859df4a564fae67402c00be70", "format": 1 }, { "name": "plugins/modules/redshift_subnet_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6e462b88c3da004840ba167cfb2cae72535842c57195c47374df2ab844d8579d", + "chksum_sha256": "b7e0ff95fdb0694d66bb4dc5acd5a00bf37fec964f23cade4fa12a9315e45ad5", "format": 1 }, { - "name": "plugins/modules/s3_bucket_info.py", + "name": "plugins/modules/route53_wait.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "992dbcbb48a88c14263e36288ba89b1a9757b6f0df7897d69cff20f2efead3a8", + "chksum_sha256": "f0fc29354954f2c99912b7f4c97f6d972bdd2f3cabc4add36357b5a147d503ce", "format": 1 }, { "name": "plugins/modules/s3_bucket_notification.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7e7275ca49bc005e0c278f2f475288b86a60a51afd1aae7df9a71a812d2bfbad", + "chksum_sha256": "3bffcf152108c5c10f946c796ff2318a389d75966de354cb5fcaf4be443e5aa8", "format": 1 }, { "name": "plugins/modules/s3_cors.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1968a322226e5d79cfc5fafb7f9bc49c579b6651ed82e990144af8d262401a08", + "chksum_sha256": "043a00712d2e58c116fdafd688121de5ef609cdfd44c957d630bc70cbe065241", "format": 1 }, { "name": "plugins/modules/s3_lifecycle.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cb57b02322198c34915fa3eedf668526cfa3b3b8cad411867fec0a3bb4a7ef21", + "chksum_sha256": "0fe546391c85523a7aa6bdb46e8ed8153f0fd469e6d72de402803821a2d5b97f", "format": 1 }, { "name": "plugins/modules/s3_logging.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "85235617fea255d699380fb5cb49be9eb6325584d1cb1104e17f0261b0af431b", + "chksum_sha256": "776d4b3884a2f605a55171d0d8538f42afd9906a8f4b360fe878e02d79a6ebc5", "format": 1 }, { "name": "plugins/modules/s3_metrics_configuration.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e813b8c6bf6ae1a2a3bf04731839303c4953d7e699875d0a211995f45607a9bf", + "chksum_sha256": "44628adf7b33f58899a690b486522d308bfa6c6dc3ed0deda30934310bb57a9a", "format": 1 }, { "name": "plugins/modules/s3_sync.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d3bd7cc04a7c7676c797601d1478d826fca715de0e41157dfeb21440f64b8d6b", + "chksum_sha256": "ae070b5854145ff8778654e54b571adbc41870e6a4d3c7dfacde1b7e56a8d8a8", "format": 1 }, { "name": "plugins/modules/s3_website.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "86dd4795fb1fc5e4fcd4592804a69d7e65ec78f7211eb0f5eb28e001a97876ec", + "chksum_sha256": "e4d5f6c4b2dd20e0c540d9047f8149cfc6f0453b72b2d56ef3e4d463f553b9be", "format": 1 }, { "name": "plugins/modules/secretsmanager_secret.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "00ea602c8aea327d56a5d2d2df12e69f4d804648b6311a68805414f90b69080e", + "chksum_sha256": "08b16b0a2c0493cf7a42667ab08bd83cd6008a0fda034302f71a43bcf80410e1", "format": 1 }, { "name": "plugins/modules/ses_identity.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9af2d01ebebd7862d7b91fba1688e4229b2370df93ff7b4c5e3e0749cf510c49", + "chksum_sha256": "4e2f56bd12a23aa3eb8657902bed9127955428cf37064323be05047814aead91", "format": 1 }, { "name": "plugins/modules/ses_identity_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "db543c3c458ebb3b8f2d16cbf53cc1db41b522e8990e4ad20b7357b1e2cab5e0", + "chksum_sha256": "61140f4cba88ca8c7b63a8db7324f18eed51bc8f1426ff5b67ece2cdd1523eb2", "format": 1 }, { "name": "plugins/modules/ses_rule_set.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "52ff6a2cea01348663c7e8f91104652de66026cf2ea5e8aaaf4f9718248047f9", + "chksum_sha256": "3432beed464e6296038ff25baa2e885bbc15ee8a4b592fadc8b1848908147fcb", "format": 1 }, { "name": "plugins/modules/sns.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "552fda976295b4149037110979d8f213ba0cb55f30558fa6ec46c7e2d3243a89", + "chksum_sha256": "e0e6033b806c8bf45156ccc977fe8f3ffaedab36508a6c0d3f8d23b14231e1e7", "format": 1 }, { "name": "plugins/modules/sns_topic.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9ead09ad248206d691b054f022e8d0eb5f89902dd10a605358977c40143928d3", + "chksum_sha256": "6df7188cc1348ad61e0d4ec34ec0ecdbaa357a33752d7c672d61048c6ab110c7", "format": 1 }, { "name": "plugins/modules/sns_topic_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c3b94209a8edd13b09bb89334dac28df5f28798d8e00f3602fab26e36f3facb9", + "chksum_sha256": "766ce9aa35ee04fd5f21abb0762fbf748af725eab43e167459d335049fe51e81", "format": 1 }, { "name": "plugins/modules/sqs_queue.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f9f9925cda9e5a0b7a0ff4a29e33a6509d84fcd139b78cd4cf791a935b4322e3", + "chksum_sha256": "d90b03038bf0623f02f72923548be73871e534d55d87fe01cf7de1050d2487ca", "format": 1 }, { - "name": "plugins/modules/ssm_parameter.py", + "name": "plugins/modules/ssm_inventory_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a8a5e9e5589b7a0b43f01cd74110fce1ce83aa28c6923bc0843910f094d00716", + "chksum_sha256": "89540d1b5ab592090a69bf0da9788c0ae8e18cfa7d618e0184c00da1843ca995", "format": 1 }, { - "name": "plugins/modules/stepfunctions_state_machine.py", + "name": "plugins/modules/ssm_parameter.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e8b54e0d3036b5f54fa6e5be136bde47d7ae6b9d6ae51c570f36a72b61a4eaa5", + "chksum_sha256": "a9ecd97cdc61788613a0377c5828a0bc96692af077f3c1c997fddecd6dded38b", "format": 1 }, { - "name": "plugins/modules/stepfunctions_state_machine_execution.py", + "name": "plugins/modules/stepfunctions_state_machine.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "99db0e45db51162e18b183014bc5facd8902bd6677f9f829e51a45651a3a7e4f", + "chksum_sha256": "6b6eaeaaf6722b4cb2eaee5f03ac2701778e19fcdd3a8f0488198ab7926530d2", "format": 1 }, { - "name": "plugins/modules/storagegateway_info.py", + "name": "plugins/modules/stepfunctions_state_machine_execution.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6f8a4c49bca4062099f0dee4a4a4f6285f4d65dcb6bd1f6514d1512787eacf02", + "chksum_sha256": "70c62ea742a48da90db3cebd1f750befd2c381be337faf5206a5e8fbe4cb05b4", "format": 1 }, { - "name": "plugins/modules/sts_assume_role.py", + "name": "plugins/modules/storagegateway_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "eaa03efc62a365c4ab2cb5a6d5c76848d5e20463dce1c3bcaeec0cc4d16ec264", + "chksum_sha256": "85b799638af3111e64ba398c7a81e52b4cfaeb450efcf6116be642b9bf1c645e", "format": 1 }, { "name": "plugins/modules/sts_session_token.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d6f6c506bbe4f6a55c65ae37b9d6fde68150121232fcfed130caefa8e8f68fa6", + "chksum_sha256": "3458c1a42dfde7200a8741178aad6a82ebab6e51a179e5b0306db5fb310d3dba", "format": 1 }, { "name": "plugins/modules/waf_condition.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ad44df69bc13238975adfae18771a43d633326f2b010f58591702df7fac37e49", + "chksum_sha256": "5131a929fdad6e9e29a80fb16245405c3fce9ef342f7133a9f5e9a1daa4ffb42", "format": 1 }, { "name": "plugins/modules/waf_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5b1cf5c43d6124f7160e375864c8d1db4f415a151c17982d14c8f264d40250cd", + "chksum_sha256": "8c3f06e42a478cfdaad04b783cd4383e3cbe2cb3fe5074debc8c51f77c2c72db", "format": 1 }, { "name": "plugins/modules/waf_rule.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f4e5a637198188c2dd7319e29a502f3d86c39900495cabe9b8591881d817a489", + "chksum_sha256": "a3d58f6ae7281668698f8c09ed05e995d723ba82dbe7a906d2a9f3d73d34a2c0", "format": 1 }, { "name": "plugins/modules/waf_web_acl.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "303bd8bf3ec0c46a3c63ce60d9df9f576d45d6f976bcaa8a611011943b8da97c", + "chksum_sha256": "0bcd8d012655999abcd84e2aa55f07efd937b14e175e05122d01ecb398c3f834", "format": 1 }, { "name": "plugins/modules/wafv2_ip_set.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7ebb40850cdcc1bf578bcc9b4bd22a7f1152d1062ed5df5ebfcec19c36fa8c63", + "chksum_sha256": "86b4623dd939d290b36e5c1602e51def048838c0e256795827cae2d88de91f6c", "format": 1 }, { "name": "plugins/modules/wafv2_ip_set_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "068e15f4b25b179442e2c6a5e55bd40d10f2e05028d664c6fcad0a562804a8ab", + "chksum_sha256": "374ecab062fe0f58172737e3f65193fa489a11cd61f7cf04f0303b567cf0256b", "format": 1 }, { "name": "plugins/modules/wafv2_resources.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "33775d642a721637605c90ba93e5d1a302de9d35663c7be8ad3f07ffd0899d49", + "chksum_sha256": "02c16135fb21ce18966c1793752390f1cf4d7290dc73461b4432b52f2f3babb7", "format": 1 }, { "name": "plugins/modules/wafv2_resources_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c60bff7b7b868d6f0b603f0269994c8ad3d678d87b6689af0182dd89954e61e8", + "chksum_sha256": "92ddd60fb4eb0d47cb47c4b3b49852b62b31fdd0a259cf1e24a516754707a65e", "format": 1 }, { "name": "plugins/modules/wafv2_rule_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0658b07ef2b67ebaacfb04b28406a20f43839038537c995e3873920799e30c09", + "chksum_sha256": "8198c99f2aabada884cd0ec0290ddb2c8537fe1728db4fad851a5d0004929231", "format": 1 }, { "name": "plugins/modules/wafv2_rule_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "911ad9b1ad0ee33dc4d4ff422ee334ad93e8d512b5ceb4c532e1f8b14d3d74dc", + "chksum_sha256": "6c72bc5de1a02e96f90489c5e34e6a3c5b56199bbdc8b13bfdab337513d401b1", "format": 1 }, { "name": "plugins/modules/wafv2_web_acl.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "61259cdf2da767a1ac0cc740eb713c4e5c45c358a8e420b0efae76c44c95b869", + "chksum_sha256": "7e7c3e615c736e9293f00b1fa117ab54d29e491947768632e2d94c9cec22b299", "format": 1 }, { "name": "plugins/modules/wafv2_web_acl_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bbfec040c28e6ef02777fb66146e7844b4c47c3aa7ce8950f58a891f4534ba24", + "chksum_sha256": "55e31c9e9d5998433db9dd05e18997148261588d5ed8a3cf074cd03c1bcd23bb", "format": 1 }, { @@ -1460,7 +1551,7 @@ "name": "tests/integration/targets/accessanalyzer_validate_policy_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3324b44b193f56e98a3a2e7445000f0f6be1c9ea536ee24bf069f0640ba4a8e4", + "chksum_sha256": "4c7945a42c1169a9555fa71f9e611ef84a0c6d7a0ed622d88fc05a3592d32bf5", "format": 1 }, { @@ -1516,14 +1607,14 @@ "name": "tests/integration/targets/acm_certificate/tasks/full_acm_test.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6436083f5c3ddee3de639eea75ff4d1cd31e9b09e89426784afcf38930caa874", + "chksum_sha256": "96b94947415c47f4e25a1c4fc7d55306818d3d8ca0aee04b569562ca75d82a25", "format": 1 }, { "name": "tests/integration/targets/acm_certificate/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5e46e7b16be4fd32417dc8180ddb449d051accf097ed501b67f0cc00c2517ea9", + "chksum_sha256": "f081b2bd5a77f435ec588d145134c6af52e2b5bd19d5d3cae924d8be5f8d5076", "format": 1 }, { @@ -1541,6 +1632,20 @@ "format": 1 }, { + "name": "tests/integration/targets/api_gateway/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/api_gateway/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3783a6ec2f0c3e19c80e694d98debe68e697ac3662dcd267502175c4f42f6725", + "format": 1 + }, + { "name": "tests/integration/targets/api_gateway/meta", "ftype": "dir", "chksum_type": null, @@ -1562,10 +1667,24 @@ "format": 1 }, { + "name": "tests/integration/targets/api_gateway/tasks/lookup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0627b871a1e95649ba6e6a0b446be1d9af65f375d809bff21e1afd4774292238", + "format": 1 + }, + { "name": "tests/integration/targets/api_gateway/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6bca5f941b24b308bec9c4c0e61948d4e7964fdfe6d995e9796bd7f09612bc3d", + "chksum_sha256": "ed6c215c9cbd954d4ba3b993fde9cea42f0568a9979c90a47436eba98ea9e677", + "format": 1 + }, + { + "name": "tests/integration/targets/api_gateway/tasks/tagging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "199cd270708ad990b2ee27e08cebdd50fd5ffe1e24394b4ea6db415c87f1b7f5", "format": 1 }, { @@ -1579,7 +1698,7 @@ "name": "tests/integration/targets/api_gateway/templates/minimal-swagger-api.yml.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5713f02e2af48506bdc7c6f98e4cd97bf32964f359d7e1c16155314b65ecf6e2", + "chksum_sha256": "832a1bfb8d582187942658a4a1dca0e20c99272ee581cf6991aca1f0aa112f8f", "format": 1 }, { @@ -1649,7 +1768,7 @@ "name": "tests/integration/targets/api_gateway_domain/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b2200ffad1f5b49dd1dbadec58f4949e0945a7df938ded994e315f3f88cb3b51", + "chksum_sha256": "3ed62d2700519e4c2b10b09d36caf775fae12dee42d5890c89c3934ef51621e7", "format": 1 }, { @@ -1705,28 +1824,28 @@ "name": "tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "983bd310159995c21c07ad87cf2066c455a50d562277b400c47e7cc3cef942d5", + "chksum_sha256": "9529a0b3a876a0a110c24729fc7665f1726be8c9e1cdfd0ffdf3a1de03490251", "format": 1 }, { "name": "tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ac545b82e17c3397d0e775ee189b24f8a325e79972c83a270461df9c0cfc58d8", + "chksum_sha256": "627b7cc23d9b082e44dd06f6fe2f14021dddab5cfb1d04c5a94d3640a4c401c4", "format": 1 }, { "name": "tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c0f317084365948900d155d9c1336d4baf90526fffbddd3b996e8579ec8c19b7", + "chksum_sha256": "5fcedb43730fd42a8540d49a159b193934e270f2a33e3ad6db09100f282e04d5", "format": 1 }, { "name": "tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "559d9e7f665a6464bdf917e7b25ff41cdd823995d01a8d6bb5376cabdba3ea40", + "chksum_sha256": "d39aa45a691c590577ecd92f93ee155d86f9dd5a7f369d16d89351adb29f30b7", "format": 1 }, { @@ -1782,14 +1901,14 @@ "name": "tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64a08168d1e07128b4c5d545d984ba4b2c8953eaf1e76e74d36280df7c600753", + "chksum_sha256": "9b2aece1a35caa3b7acb6ee95a3b123f135733dc1f8e59549a2fcb97e80b8d29", "format": 1 }, { "name": "tests/integration/targets/autoscaling_instance_refresh/tasks/refresh_and_cancel_three_times.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6f7f271b0256e2c817776db8d02ca561bd370a5ae78b026533412d3a38519314", + "chksum_sha256": "810aa0ae80975186b86942dd7de03773cb2e054c7d0f5f54b5b4efdec89b19df", "format": 1 }, { @@ -1859,21 +1978,21 @@ "name": "tests/integration/targets/autoscaling_launch_config/tasks/env_cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "554a62c6b081150c0d1e241b860a74e38bcca31e44bec9bc5fee4ef48ed59153", + "chksum_sha256": "d6b904d6a49be157ab02776b7db6fa68090d0d3b4ef0d64a88d09fc071ada2c6", "format": 1 }, { "name": "tests/integration/targets/autoscaling_launch_config/tasks/env_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "61658f73c20b9db76c17cf247fe3f8305f7b0df51ae3a4b41640222b3c2449fc", + "chksum_sha256": "b38a850ad910d17efdabc6056ded17b7bcee26f7e893d86c5957943df8870d80", "format": 1 }, { "name": "tests/integration/targets/autoscaling_launch_config/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c91c2929ca15e7837e8bfe2fef71ddb22c4146f7b82f87bbb427e67b57eda1d2", + "chksum_sha256": "3bc1a5513987d5b5318be598ce6735c461eea652c906eeeab583893dcdfe31b9", "format": 1 }, { @@ -1915,7 +2034,7 @@ "name": "tests/integration/targets/autoscaling_lifecycle_hook/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { @@ -1957,28 +2076,28 @@ "name": "tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/create_update_delete.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "91f97b1be311a231ce16dee2c3d94f683fb75cff327b3c8c730549bd1fa59669", + "chksum_sha256": "cd7f919350f80a211e3f84dfda92a6a539f8d68a852a9513b8248aae04ace3da", "format": 1 }, { "name": "tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "589715d4041cae20e7f28b7d4d50d4892eb25446437838408fbd27dbb4ed6d8d", + "chksum_sha256": "9b84714a9f521785a9eb822a24a45acee898f02321e74b12367fe73cc668c63d", "format": 1 }, { "name": "tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bfedb707d03835770cb2aaf3edfbd97b1472881fd04cfd85559bc3675bf599f8", + "chksum_sha256": "5c74ce79ae8bf5f5fafbd856a0ad3e7582c49d2e1da4c008f5c77ac19a96227f", "format": 1 }, { "name": "tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "54e107dc11e2d7a141046b7933e6e5eeabf0c3860ab856b3f3a74af20e4252a5", + "chksum_sha256": "11e4191a8fb247be972aaf5e8a7b2fbc12e514ef53f1736ca90b11d21785b50e", "format": 1 }, { @@ -1999,7 +2118,7 @@ "name": "tests/integration/targets/autoscaling_lifecycle_hook/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "36ed879c8f6bfe8198b7e23c3ca879aa3280613a09f76e879e65d8d2d934b040", + "chksum_sha256": "365c26c94d84d4e82a379b3a77e69c07e27e7144209cb9a7d54b6b6d71c86cbc", "format": 1 }, { @@ -2055,7 +2174,7 @@ "name": "tests/integration/targets/autoscaling_policy/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fbc367e921406afcd6205835b7ad6c224fb79169221e276e47edae0fca758975", + "chksum_sha256": "16babd09f0f0e1ebca1fd89cee72568a88dfa304319a9733f85c6e999ecd0081", "format": 1 }, { @@ -2111,7 +2230,7 @@ "name": "tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b03e212eaa3df04e3275fe6ef47915daa0c383af25bc88aa2c8cedf2cec8855c", + "chksum_sha256": "753fd24292d2402c3aed13ecc9df608588006afff84d462216487471bb9532c1", "format": 1 }, { @@ -2122,55 +2241,6 @@ "format": 1 }, { - "name": "tests/integration/targets/aws_region_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aws_region_info/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aws_region_info/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", - "format": 1 - }, - { - "name": "tests/integration/targets/aws_region_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aws_region_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9e08c8cb8631a42b7c94756551b06830cfc35f765f3e16f4dfad800f2197449", - "format": 1 - }, - { - "name": "tests/integration/targets/aws_region_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", - "format": 1 - }, - { - "name": "tests/integration/targets/aws_region_info/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ce48a8661f558765a481c8bbcc58b9570c97a411f6803851ee0668d3c3f74b28", - "format": 1 - }, - { "name": "tests/integration/targets/cloudformation_exports_info", "ftype": "dir", "chksum_type": null, @@ -2230,7 +2300,7 @@ "name": "tests/integration/targets/cloudformation_exports_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cae31f7031f4f2201fa51c73db0fbd74dece2dba3051d22e67b1f9f385897179", + "chksum_sha256": "7818687dbab72fb72a2408a34c1ac6b2ecef45256d614e4858078c0186c242bf", "format": 1 }, { @@ -2293,7 +2363,7 @@ "name": "tests/integration/targets/cloudformation_stack_set/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8bf5d5d7a4a0cccefd6dd03a99b3f07c892ac79eebfab23eda2895905b2e8940", + "chksum_sha256": "4626d159b88503a0fdf49ffccc6367591d5412bd38e967a7b13771f0d937d3eb", "format": 1 }, { @@ -2349,14 +2419,98 @@ "name": "tests/integration/targets/cloudfront_distribution/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "471012cf65610e30d80b1c3fb23237881ab73360a5b644c40eb4f2b73cddc993", + "chksum_sha256": "884fdd74c1d1a3a8c6b47d3865e87b2148784ab6ee43fc69636ed719fcede58f", "format": 1 }, { "name": "tests/integration/targets/cloudfront_distribution/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ca9356675e8b56f45f8139b71097e596da9d951156d673e3adc37f637b230070", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_invalidation", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_invalidation/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_invalidation/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebbd605a81b0283d658d5307bf361c9d652f0122cb23f4742f7c3d136798878d", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_invalidation/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_invalidation/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af750b27abe783562efcbef32248fc4116dc1a6b704d33632048ebc692fb53f7", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_invalidation/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b58f6ba7516bc781cf36f6245d279062e8ddb6b1c2a6fedb1a07161358d6921", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_origin_access_identity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_origin_access_identity/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_origin_access_identity/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebbd605a81b0283d658d5307bf361c9d652f0122cb23f4742f7c3d136798878d", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_origin_access_identity/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_origin_access_identity/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1531bdf02a3c33a25abd3e9dd6fa7975c71f9a7e999487baadbade0c181ee8d", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudfront_origin_access_identity/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b58f6ba7516bc781cf36f6245d279062e8ddb6b1c2a6fedb1a07161358d6921", "format": 1 }, { @@ -2391,7 +2545,7 @@ "name": "tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4d1b4aadb4ef86523e8723e030487ca40b648b6af05537fa80390ddcb0e16fa1", + "chksum_sha256": "1516ab8a89d11cd27b3ff43454262102638c03bf2c313607a47b05d8628c9f74", "format": 1 }, { @@ -2461,21 +2615,21 @@ "name": "tests/integration/targets/codebuild_project/tasks/description.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7084db0a1e42a1ad274b37f780d37683431f36cfe5a42f287fb7e178c6771dd4", + "chksum_sha256": "0c98e0916f712ae85828f9099a0c0f6ff5c385430ba4d7c5877c859a2cfdf999", "format": 1 }, { "name": "tests/integration/targets/codebuild_project/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "885a36655b52cbe6c23dfd89eca74b9adcb2d570a3bafa18a62ca6e3e6d639cd", + "chksum_sha256": "c7d6e6f7629bf2e8c2fbdbbda47625aa310dfc2ae4dbdc74272dec45d4d6c2ba", "format": 1 }, { "name": "tests/integration/targets/codebuild_project/tasks/tagging.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6d4aec1182b71dd3aa574518efacc0f080cadc3f9cb05e3287fc2a070011c996", + "chksum_sha256": "adb5420dc5967ef8464fc00e9b5ea805a8ecac46b72cfadcc3b4ec3a170118e2", "format": 1 }, { @@ -2531,7 +2685,7 @@ "name": "tests/integration/targets/codecommit_repository/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7fb530f4e7c9cfede1195502bf20b5cca69dc6357da43457c6392d42b5e148e1", + "chksum_sha256": "92346dc3c9f5ef48ccec4448e6dbf2d1546b6771461bfd562c97a745f23f6bef", "format": 1 }, { @@ -2601,7 +2755,7 @@ "name": "tests/integration/targets/codepipeline/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e755c6e946c725a23a9457a0999730ae3cd1f7583c449c5405d8c9dd63997d62", + "chksum_sha256": "bb8700530ffe1964ba3da4ff9fd4745630418ee7265d9b47a93eefc21afb5802", "format": 1 }, { @@ -2629,7 +2783,7 @@ "name": "tests/integration/targets/config/defaults/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f94b2d41159b50f2e3d48fb790068af4bc91fa788267f800775ea7fae5573853", + "chksum_sha256": "0ec3fb53dbb1ee15167f5ab44435d598e66ae0cb77f473507d6a1c7dd90841b6", "format": 1 }, { @@ -2671,7 +2825,7 @@ "name": "tests/integration/targets/config/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3362b38bb4d1b1806ea300b5285b3f1ea386e25a96e87ab34eeb4331b1b7ccba", + "chksum_sha256": "2daab797eb69d23bff5a3d30e0f05384d037c650dda9f83f12e3d932be107178", "format": 1 }, { @@ -2682,6 +2836,13 @@ "format": 1 }, { + "name": "tests/integration/targets/config/templates/config-kms-policy.json.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0bb13e44d2bce37023410704a168930a1f3969a444ae587eb4dcbdfa546bb81f", + "format": 1 + }, + { "name": "tests/integration/targets/config/templates/config-s3-policy.json.j2", "ftype": "file", "chksum_type": "sha256", @@ -2717,10 +2878,17 @@ "format": 1 }, { + "name": "tests/integration/targets/connection/test_assume.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "844265d0d1bc33f7c262bc3aa628e130c0833449cf2102a9766167049afb33e0", + "format": 1 + }, + { "name": "tests/integration/targets/connection/test_connection.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "922f40a2d273045274511b3b426ddfbafbca7193cd72ad22de6719529bfd4d0e", + "chksum_sha256": "9956dba8bd31e66488ee81a932f69a5775367b025ef1e17ed9d85fa75ed04960", "format": 1 }, { @@ -2755,7 +2923,7 @@ "name": "tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "22056e63edccac88c747444d60a990c4af4b548ddb1a86a9d75deb058a0d00aa", + "chksum_sha256": "2b7963e4faeef91fa0375fa47d6defb1c3ac50da36a0b4a6fe7c64569d46749e", "format": 1 }, { @@ -2822,196 +2990,196 @@ "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_cross_region", + "name": "tests/integration/targets/connection_aws_ssm_centos", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_cross_region/meta", + "name": "tests/integration/targets/connection_aws_ssm_centos/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_cross_region/meta/main.yml", + "name": "tests/integration/targets/connection_aws_ssm_centos/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "a935dc258dfa740bfab5f6e4a47c4dece05a773cb3177c50f45b58181a9b1e74", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_cross_region/aliases", + "name": "tests/integration/targets/connection_aws_ssm_centos/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "9cf20049d8039a9fcb954c467ea44b85e2890bf06ebe4d5ec35a62d1d49d7b9e", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml", + "name": "tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "074a8d6623529ab13e645a2746b38fa0aa37577eab5d2ffb0ad97ff86d74417d", + "chksum_sha256": "eb4403d62e54790741388975ce54a8c4f431182776545c5416a5226a1e9ee87d", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_teardown.yml", + "name": "tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_teardown.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "68e17efa882ec51dc465da88117ed7d3bdb822103ca51acd282cce7e2f68dd38", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_cross_region/runme.sh", + "name": "tests/integration/targets/connection_aws_ssm_centos/runme.sh", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "ff344e2c3c7404d48af1e93312e4d47bf935e9a28413a0bbf7c6fd54b74ca900", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3", + "name": "tests/integration/targets/connection_aws_ssm_cross_region", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/meta", + "name": "tests/integration/targets/connection_aws_ssm_cross_region/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/meta/main.yml", + "name": "tests/integration/targets/connection_aws_ssm_cross_region/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "a935dc258dfa740bfab5f6e4a47c4dece05a773cb3177c50f45b58181a9b1e74", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/aliases", + "name": "tests/integration/targets/connection_aws_ssm_cross_region/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "9cf20049d8039a9fcb954c467ea44b85e2890bf06ebe4d5ec35a62d1d49d7b9e", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml", + "name": "tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "592b2da3cacbbbc971e5546738c28906f6d1ed9bba273c791706c69fc017e826", + "chksum_sha256": "8b71ed17b15a333184c83ff9b16b3b6633bad5ec7c6c3ec8e61f104c60d2a63a", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_teardown.yml", + "name": "tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_teardown.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "68e17efa882ec51dc465da88117ed7d3bdb822103ca51acd282cce7e2f68dd38", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/runme.sh", + "name": "tests/integration/targets/connection_aws_ssm_cross_region/runme.sh", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "ff344e2c3c7404d48af1e93312e4d47bf935e9a28413a0bbf7c6fd54b74ca900", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_endpoint", + "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_endpoint/meta", + "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_endpoint/meta/main.yml", + "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "a935dc258dfa740bfab5f6e4a47c4dece05a773cb3177c50f45b58181a9b1e74", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_endpoint/aliases", + "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "9cf20049d8039a9fcb954c467ea44b85e2890bf06ebe4d5ec35a62d1d49d7b9e", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml", + "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8e3892a17060475e23c133887e84451e6c32709892480f99f1e2c7cacd1be05d", + "chksum_sha256": "240cfda6ad757bab0742479862686d5bda600d6a68dd4f2c65f47eb1007672e0", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_teardown.yml", + "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_teardown.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "68e17efa882ec51dc465da88117ed7d3bdb822103ca51acd282cce7e2f68dd38", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_endpoint/runme.sh", + "name": "tests/integration/targets/connection_aws_ssm_encrypted_s3/runme.sh", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "ff344e2c3c7404d48af1e93312e4d47bf935e9a28413a0bbf7c6fd54b74ca900", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_fedora", + "name": "tests/integration/targets/connection_aws_ssm_endpoint", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_fedora/meta", + "name": "tests/integration/targets/connection_aws_ssm_endpoint/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_fedora/meta/main.yml", + "name": "tests/integration/targets/connection_aws_ssm_endpoint/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "a935dc258dfa740bfab5f6e4a47c4dece05a773cb3177c50f45b58181a9b1e74", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_fedora/aliases", + "name": "tests/integration/targets/connection_aws_ssm_endpoint/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "9cf20049d8039a9fcb954c467ea44b85e2890bf06ebe4d5ec35a62d1d49d7b9e", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_setup.yml", + "name": "tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0bf5b563bfd9f546398b3e7918753a794b20aef0e8d1ff1c96de51401c78ccad", + "chksum_sha256": "93e7d9024988ad93cb9aed3050fccaaac2444ea0432712b597e4a2355ef61f4c", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_teardown.yml", + "name": "tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_teardown.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "68e17efa882ec51dc465da88117ed7d3bdb822103ca51acd282cce7e2f68dd38", "format": 1 }, { - "name": "tests/integration/targets/connection_aws_ssm_fedora/runme.sh", + "name": "tests/integration/targets/connection_aws_ssm_endpoint/runme.sh", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "ff344e2c3c7404d48af1e93312e4d47bf935e9a28413a0bbf7c6fd54b74ca900", @@ -3049,7 +3217,7 @@ "name": "tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2fbddb4b12de01cb5032a1197f88bbe6d099a0f10bf726fa5ae2ea8800f748bf", + "chksum_sha256": "1a4e1f76cd9bcb4c59113bdc0444626b526371014355e2ce3732919136ed3f1f", "format": 1 }, { @@ -3098,7 +3266,7 @@ "name": "tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e451c4bfa160e675abbc79d9481ef7bbe952f22e64fba32177e4937d11ee8d1d", + "chksum_sha256": "54dd0b9662038c9ff6eb3d077b085fd579f662f0299ec78e62d0102bb697cabc", "format": 1 }, { @@ -3196,7 +3364,7 @@ "name": "tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4908450b802a3d8e314b5f4ff49ad1127b3a496a642ed84581cad03c38c4370b", + "chksum_sha256": "ea5a811a2f45d9e3781e2b9b9fe59911ae86318ce0f299ea514f0353212d86c9", "format": 1 }, { @@ -3238,7 +3406,7 @@ "name": "tests/integration/targets/connection_aws_ssm_windows/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9cf20049d8039a9fcb954c467ea44b85e2890bf06ebe4d5ec35a62d1d49d7b9e", + "chksum_sha256": "ad01935111c0178fe8ae2ee5dc08015f1d8a8511e0944896c441537742a0b4f5", "format": 1 }, { @@ -3308,7 +3476,7 @@ "name": "tests/integration/targets/dms_endpoint/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0e86bd37884e4ea99725bd4a7819531a795322d19b500a7cb09cec70e3afea61", + "chksum_sha256": "2ed33a12d9f3e10eee1f5b7f5dd028f39ef988fc3c52faa852d6fb35ba00ce31", "format": 1 }, { @@ -3385,7 +3553,7 @@ "name": "tests/integration/targets/dms_replication_subnet_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "20e8c1ea3ac3946c1c0b79223f418d3f1140ea6e55e37baa695fbdf149bb0f0e", + "chksum_sha256": "f9ad7c0b73e03b9f3eb518af151af70f904f2922e1321adbeaccad089974f7e3", "format": 1 }, { @@ -3413,7 +3581,7 @@ "name": "tests/integration/targets/dynamodb_table/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f6100a743768782dbd903d1dfd203aa9a934e58c0e7d57500f50c25763b7906d", + "chksum_sha256": "508a88a997fdcbf17b1251a7d9c89cf49fa8444027fef82e9031ec299e8763d7", "format": 1 }, { @@ -3427,7 +3595,7 @@ "name": "tests/integration/targets/dynamodb_table/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "30812647bdc4e6916e32ca6e0d8e9450e6d96fd7d4cd8696b66a0eadf9bd95e2", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { @@ -3441,21 +3609,84 @@ "name": "tests/integration/targets/dynamodb_table/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ebf5e776f8d7b0cdbf6902b0b055d9447157978123ada7a973d4d9963a57fc9c", + "chksum_sha256": "564f5d43b5e448142ce041267325d9149d1743af5027a7875064248ba656ce22", "format": 1 }, { "name": "tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a9f60025548dde76cf817eea7c6e5e4da25dc5b9af756d48096ef3f2f437f886", + "chksum_sha256": "45cf53627344eb31b7bb79bddca9d6e53d9a04a84136c5c2118e4ffd5b3e3694", "format": 1 }, { "name": "tests/integration/targets/dynamodb_table/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6a25dbd24e5726254fa451c380cd6ffb9b21c20a864a01fb47896ab92d0c0945", + "chksum_sha256": "69f6e741c00c835192318ad216acc1aeca5bc6bc928066a3676ebbc457d483a4", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53741eeeb63ec126b9a7cd1970775b838663d385e84362e05b5a616fe907ef8c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e9aac0df49a54677a2aba40cd0749e878704833388eea416102ca8990ba7c02", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/tasks/tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3810e1940f7dcbec8039adb39d983bb1c8d690e0fc4368ca6db1c615fff9a49", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_carrier_gateway/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "868ad2099a29b0f4de834a35f57bb18fa7072a1cc1e065435d2e7c6e322384d2", "format": 1 }, { @@ -3504,7 +3735,7 @@ "name": "tests/integration/targets/ec2_launch_template/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ba5fe384d872545f0fe7608470b5582b10c7dfadd5c45e2bd7fbcf6e71438b57", + "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", "format": 1 }, { @@ -3532,14 +3763,14 @@ "name": "tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c85e9a789d61afbbc9b44bc498c8fd7460f1a889cf2f797d106784cb9d5f6a92", + "chksum_sha256": "c2c2a5a2987708baec107e0516971a21bbb4b580876aacb9a769333bc9c2bc07", "format": 1 }, { "name": "tests/integration/targets/ec2_launch_template/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9099b69e60a867e884055e9848aad0a7bc75e9222bc02927e9479701998206a2", + "chksum_sha256": "c4eee433d04b596c8143d418fa3e93b65b894b930ff6c6bffa194db167d1a3ae", "format": 1 }, { @@ -3553,7 +3784,7 @@ "name": "tests/integration/targets/ec2_launch_template/tasks/tags_and_vpc_settings.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6a4c60154f998d018b6c1c82fc522d8a0d08e38c26554e1e7199e76a1a7a0af5", + "chksum_sha256": "fd1a5eeb9430b66903a192f11b6822236a77d29e47086c46e5d8f785bbfbe7bc", "format": 1 }, { @@ -3616,21 +3847,21 @@ "name": "tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "554a62c6b081150c0d1e241b860a74e38bcca31e44bec9bc5fee4ef48ed59153", + "chksum_sha256": "d6b904d6a49be157ab02776b7db6fa68090d0d3b4ef0d64a88d09fc071ada2c6", "format": 1 }, { "name": "tests/integration/targets/ec2_placement_group/tasks/env_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "61658f73c20b9db76c17cf247fe3f8305f7b0df51ae3a4b41640222b3c2449fc", + "chksum_sha256": "b38a850ad910d17efdabc6056ded17b7bcee26f7e893d86c5957943df8870d80", "format": 1 }, { "name": "tests/integration/targets/ec2_placement_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "059a8ef0b4d579012203884b54ca9384fdf1f4104c426fd8bb239228bb59ad78", + "chksum_sha256": "5a2c9200376e712afaf49067b0a6f8051ac5b52df6feb53cf251a7d87caf2d88", "format": 1 }, { @@ -3686,7 +3917,7 @@ "name": "tests/integration/targets/ec2_transit_gateway/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cf12ed96541acd4ed57ca3ddb61abf3f61869c073f66c66c59db3595f3ae9123", + "chksum_sha256": "9d9b2f61f864ccee1f302f93b6ac61aaaa52ec1ed7e96c270540a25958f311c1", "format": 1 }, { @@ -3756,7 +3987,7 @@ "name": "tests/integration/targets/ec2_transit_gateway_vpc_attachment/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "095b2eb8958113887af57fa69d067d5dc652d79579eca75370f3d507470ccbc6", + "chksum_sha256": "ca24ff6b034b97d4ad38bee106733c3c62d239245ccafc89af2ba1d0dcc895ee", "format": 1 }, { @@ -3812,7 +4043,7 @@ "name": "tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "af43e67395e34f85699165403bd325c658be8666f05a477b09d17064a65d586f", + "chksum_sha256": "8cb118c93bcf55328affb7e0e2a4680b544273972b30eed08d8846221fcaf174", "format": 1 }, { @@ -3882,7 +4113,7 @@ "name": "tests/integration/targets/ec2_vpc_nacl/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ffffd97b51783b27d97267c3c1f5c404963e41f96de2cd3e89752b0bbbe001f5", + "chksum_sha256": "345314177dac44a8f9cffa690cea4e9ee7ca571ab90466297612b172b8bfce34", "format": 1 }, { @@ -3959,7 +4190,7 @@ "name": "tests/integration/targets/ec2_vpc_peer/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "28e43fcefd17e48e6496391fb2ad2f71f4f56ca87a187e96be1b10f8c2ea8ff3", + "chksum_sha256": "8e2e5176309859bf2ec19716bf80c79de8b30a2018b47236fa38fbd2b222db9c", "format": 1 }, { @@ -4015,7 +4246,7 @@ "name": "tests/integration/targets/ec2_vpc_vgw/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8d37d14f9b9d93d90c337fe644c2029957a6b929fce7e8b4d547aa9e78196931", + "chksum_sha256": "92e57ee807a32575150a6ff277d698687ad7c17da0ac35a8e57b81c786d2c22c", "format": 1 }, { @@ -4064,7 +4295,7 @@ "name": "tests/integration/targets/ec2_vpc_vpn/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "46d2b0133a0ef966ff701062c6e0dabd69c8756212c6ae067208c4fce73edffb", + "chksum_sha256": "0c65eca77fbdfc44ce24b2a0ef1286f2b54d5f5f90977cd5c1012ed7678bf82b", "format": 1 }, { @@ -4134,7 +4365,7 @@ "name": "tests/integration/targets/ecs_cluster/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "80a8830c4e171ebcf66a9bbb2dccf8e4c6fb39bca5d4077ed0194e2873251340", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { @@ -4148,7 +4379,7 @@ "name": "tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bb415c8d13620e6c1e16b5fae42ba508a0e3a56508595a019043da97223b5a76", + "chksum_sha256": "efd45aed9d07ce65678fe5fd85bad06e52d8330e8f7b1384a044a8848d1e6285", "format": 1 }, { @@ -4162,21 +4393,21 @@ "name": "tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2a01b7482a53eef389f3099b34476c7a79350c505b494310254f5e8b6fa72893", + "chksum_sha256": "e7c2cef87ceeb6202914dd1e3afab3d965bc3309037e08f0e96bfbd273583674", "format": 1 }, { "name": "tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "490ae3411eb43eb65a8bc9f68250a00ce9bb9fe9be951ad943e4ccb7e2b77404", + "chksum_sha256": "a93853097a3c134e3b08d8498a5a6b9218d12c403a4ff4aab3964c88db6b0365", "format": 1 }, { "name": "tests/integration/targets/ecs_cluster/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "af10b2d8ff5befbe177d1a28c9663a9ce186e51033d270674b1d8558835b836b", + "chksum_sha256": "8b0d543a58d4adc01b5ec515c3c3974b7e520b0ccfc2cc22b7ab3c5ecd4e698f", "format": 1 }, { @@ -4232,7 +4463,7 @@ "name": "tests/integration/targets/ecs_ecr/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "08d279ab610a6d73302bcf40e5e6cccedbe5886d2dd603a54ba1ecb7e2fcdd4d", + "chksum_sha256": "2d617767cd0a8218bd504d9f1d76c56158b5f3aea48de96b8bdc73db6e3ee477", "format": 1 }, { @@ -4288,7 +4519,7 @@ "name": "tests/integration/targets/ecs_tag/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3c7b629c0ade54768336a2489a1f4818b261fb068fcfde43e1136114135fe424", + "chksum_sha256": "fa309fb0e7e8d61f85a6d0e65aa1939a00c2d45d6421cfb77a0f405d355dbc9f", "format": 1 }, { @@ -4344,7 +4575,7 @@ "name": "tests/integration/targets/efs/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "29aacf6bd6dcbaae5975d09e89044b9961f7592138a0eb99179e1702761c1e8c", + "chksum_sha256": "ea0c07cb022eeea0f23dad750aa26861e199f64e5fd605562595ab08c3efcfa5", "format": 1 }, { @@ -4414,14 +4645,14 @@ "name": "tests/integration/targets/eks_cluster/tasks/full_test.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "afcebd96f550de9993c2e416130c2f601f3e1b012d4448d146aaa87fdc64482d", + "chksum_sha256": "4eb39903b378d06cb7f278a0df8f58d36a168a97a412407d3ea73e299c37d1ad", "format": 1 }, { "name": "tests/integration/targets/eks_cluster/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "914f612a4166d146c9ff3224f3289f6e5ae42f2d925d2be14351836582b329d8", + "chksum_sha256": "bd552efddedf35e2323431cd159bf009a60d4115a4c02073938bb4bb362d9770", "format": 1 }, { @@ -4498,14 +4729,14 @@ "name": "tests/integration/targets/eks_fargate_profile/tasks/cleanup_eks_cluster.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2ac41fa3c2fdd78753733a08ebf36d6846e08d16ecb8299d8fbf9d4e85d12cba", + "chksum_sha256": "0f41043002e78a5bb1aeeab852b73cdf34339ee9b98414b82ba86bb8cf6540ef", "format": 1 }, { "name": "tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1ddf80999c0a9aba256cf821ebc12d3177e7977f35a8a75c688d98cb13a36219", + "chksum_sha256": "568f82698281c509f6b951355dc20ccd4e119533a10a53c0fc8b3d24887a599c", "format": 1 }, { @@ -4519,7 +4750,7 @@ "name": "tests/integration/targets/eks_fargate_profile/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fce8e75016b18cec52e460b6254c7a83210637e99ec512438ca1ea46172a6432", + "chksum_sha256": "3fe610d444fbca34ba4211c95b5bdac3a892a893345b893e839908f2a605f5b9", "format": 1 }, { @@ -4582,35 +4813,35 @@ "name": "tests/integration/targets/eks_nodegroup/tasks/cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "38057bbd60926be52ca44f37b397c6d27d42a5ff1c7c2b92c1166161e58ec656", + "chksum_sha256": "e4a514b77594c21ce75f89b65ed0c2a534feae994d09523ef4039fe503f0a5d6", "format": 1 }, { "name": "tests/integration/targets/eks_nodegroup/tasks/dependecies.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bae338ee4b13aad2cfa9ac2fc328a8166729b8ac6e76c8121b27e20741ada6b5", + "chksum_sha256": "a878bdf097055ba6a0007ec40d00ec9371c99f1a306eea6e8f3b87b06f9ee7dc", "format": 1 }, { "name": "tests/integration/targets/eks_nodegroup/tasks/full_test.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7ebc60cd509bdce61d904ea0e77b02ea84e83577d5bd918e1571c8ba7c84bca5", + "chksum_sha256": "e82734eafe0d1922e57f535fcf82ddb27b485d2bf21e14814f569a622501b0ef", "format": 1 }, { "name": "tests/integration/targets/eks_nodegroup/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d337eb087d1ec3b2a1ce9a1d21e1ff8244b21fcef9080225a1e3b9725e8a1062", + "chksum_sha256": "022e78f68b60306da54d7c785bcd0ed38645595aa20bc1dd6666b62c173f34a3", "format": 1 }, { "name": "tests/integration/targets/eks_nodegroup/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f9fe17586a25ccaae77eb0ed119358964b244bef37696d37152ef3964a0a7afe", + "chksum_sha256": "aab55ff735b17633ec85d2050fea115ef0082e8246cba061964b335a60765b93", "format": 1 }, { @@ -4659,7 +4890,7 @@ "name": "tests/integration/targets/elasticache/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f9c9dcb2e847ee6ed349b3f28bd95b7b20f04b5f5862f391e9d49c228bfe89b8", + "chksum_sha256": "ab4eb1547afcaf928245196b661323b9a4c63ec76fa441873a38719e619be7bd", "format": 1 }, { @@ -4715,7 +4946,7 @@ "name": "tests/integration/targets/elasticache_subnet_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fc6760c7c485b93e3c094e633b832d0de8be11ec7e69157b86c365021f5aa373", + "chksum_sha256": "4f30e4a5c9eb2d3e14282787e8894c4524fac7705ee1c72120fe38fd473c83a5", "format": 1 }, { @@ -4771,7 +5002,7 @@ "name": "tests/integration/targets/elasticbeanstalk_app/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "49e6ec472ac39b7d6aff31a7f0f2a0d60aee79639c31c49a27c24388551e7287", + "chksum_sha256": "1c2e07d35a32ad5778559d018ea20aca13071ef7bb242fbe8ee059258755d068", "format": 1 }, { @@ -4827,7 +5058,7 @@ "name": "tests/integration/targets/elb_classic_lb_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4db2eddb0c919f943ab6540d90e7c44515dea128ccf63baf59692cc88ac3fad8", + "chksum_sha256": "fb42cd42fd956ad9097e38d06432eabaedb9ad38698ce49ed592ce68bc7ce382", "format": 1 }, { @@ -4904,28 +5135,28 @@ "name": "tests/integration/targets/elb_instance/tasks/cleanup_instances.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ed7b62c33be962daf2d99ecee1b0a27647d9f3100b53c8d0a5393a35ff4623b3", + "chksum_sha256": "7e74a9bbeae3d7a8573ce2dfee36a306a91122b73627ada6a3f3ef6ac5004210", "format": 1 }, { "name": "tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7863ce99f4e345a5f70daa74cbd23a61773566608179eac0c980443ab2043846", + "chksum_sha256": "9c344918188ee98c9cef56dd89c18a3411fd15c04394539726af5069c2716ed5", "format": 1 }, { "name": "tests/integration/targets/elb_instance/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "01ad4c93b6036d4c021eccaae757778a6c0ae7719676558dcc4747e73d59a59f", + "chksum_sha256": "eca19e252f67e0b7d29e151c90a6612888567c3a8335e0a5f6cfff8f47efb93b", "format": 1 }, { "name": "tests/integration/targets/elb_instance/tasks/manage_asgs.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3892ab7c1d852b66306d71ed5fd6e7e2e8242ba309087bcb83c0fc6bcd76dee7", + "chksum_sha256": "a23171e68941eea22224e160edfed116936a7e1822e50030b1cca3d35b392871", "format": 1 }, { @@ -4946,14 +5177,14 @@ "name": "tests/integration/targets/elb_instance/tasks/setup_instances.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d4464aa09bac5cb90830889381cea1f4d82352ec28d97c6dc241d48838ef7fc4", + "chksum_sha256": "ce1a93aeadcb4c48ba22354d6284a933b93e6ead6230a38fca94e462a929679f", "format": 1 }, { "name": "tests/integration/targets/elb_instance/tasks/setup_vpc.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "28e80e8a1b51a99405c190083ac25872c15fb87359877b3fca9f5f9b7de40b3e", + "chksum_sha256": "c7b33704d4015d41ae521c7b3833453376b4e270ee3b92b088acd41ee77a3a12", "format": 1 }, { @@ -5030,7 +5261,7 @@ "name": "tests/integration/targets/elb_network_lb/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "71756275db9e6db7ba16600fd050489261b8e298f6882a4d43c0645ae58b7b3a", + "chksum_sha256": "6102f98e978e90ac3e528d10af42b0fb2cb60133473fe7a12adb9d6d4df86d32", "format": 1 }, { @@ -5072,14 +5303,14 @@ "name": "tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "00b7cbe5b33ab9501bce47c2de5f41fc079e2b8f15d63d2314a1656775026844", + "chksum_sha256": "c2b12faeade2c2b44e5f185f0695b353c23321790fdf8268d94a8f58dac3dcce", "format": 1 }, { "name": "tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f0022885dd65c8abe35f1d5f8d16aabaa0010e802801193db0ca0533c560649a", + "chksum_sha256": "a2fc0323c04333db2084fe91a1bcf8d0c96b7daf8c84c15c22d20bbd84f5df6c", "format": 1 }, { @@ -5121,7 +5352,7 @@ "name": "tests/integration/targets/elb_target/files/ansible_lambda_target.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "10263eec202a91c1c9dbfefe781da519abedf417b08f45c12cc087e2b87b499f", + "chksum_sha256": "04b46c135ce7052cf1ea685bab2ac2e01214059144718f06bcf014fa783c56ac", "format": 1 }, { @@ -5156,14 +5387,14 @@ "name": "tests/integration/targets/elb_target/tasks/alb_target.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7edf690ca74786ce2fbd7196241b4f520824ba6b5d2a599020a7b38e987aab7a", + "chksum_sha256": "533b3914aece3fef59642804eebf8022a9313afa95f9c76fa7d3a690befd4175", "format": 1 }, { "name": "tests/integration/targets/elb_target/tasks/ec2_target.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "72955f4944464ba33f66da28b4a77fcfb1c4210cc9c0e0a37e2647b39224764a", + "chksum_sha256": "73c51ac261b2d4409810771fafa5b2b0eff05c6e1165886d677f3545db753aeb", "format": 1 }, { @@ -5184,7 +5415,7 @@ "name": "tests/integration/targets/elb_target/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "76af6ebb3dbebac09e1aa99fdfff90253d7fac9f00c41accd39b0f79ee504069", + "chksum_sha256": "f9cea12c70ea54ca1552137254d1f1d584bef46267e7d077043a55081634ae25", "format": 1 }, { @@ -5240,7 +5471,7 @@ "name": "tests/integration/targets/elb_target_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "959fe482146c396f9a5fd0e870d028bcec9d810b355c96ec987523b1f7624df3", + "chksum_sha256": "2436f263a22b7fd7fc7c8d2275b59cb7da9aa33a67c116e121db9f13f8a57373", "format": 1 }, { @@ -5282,21 +5513,21 @@ "name": "tests/integration/targets/glue_connection/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4b3d8eae5617e123c403013215e25e140b5c5b6dbe8acc22f1dc6766180e3431", + "chksum_sha256": "af12da62144c9fff643ca8078424341c20fe82d268a810366ac77bd660bea374", "format": 1 }, { "name": "tests/integration/targets/glue_connection/tasks/test_connection_jdbc.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f7017a59cf2a7eff8b666f2976c9617e77c3e5948d1cb5c8851288d13c5f6f77", + "chksum_sha256": "a6ca87586892f6fafdca924670feee3fd3bff6058f7d21afcd21bba39602bada", "format": 1 }, { "name": "tests/integration/targets/glue_connection/tasks/test_connection_network.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0b60289a10890047041107242e0eb7757742650ad4205784a7244e43119ec379", + "chksum_sha256": "017e8427360e87a5e4ae292feadf713406732cf3a82d62b184b674013dd0c226", "format": 1 }, { @@ -5352,14 +5583,14 @@ "name": "tests/integration/targets/glue_crawler/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dcb66c43d91e12b6408ca21c9494307c89a358acb75f174c0c424174214d5f50", + "chksum_sha256": "75af09096eb14843c8637cc7b8c92d76444cc553cb56507e1e993503638359b8", "format": 1 }, { "name": "tests/integration/targets/glue_crawler/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "5f308323f63a3796fb5c025859d3a05e574ec8751abe10625b44875c32b2fb47", "format": 1 }, { @@ -5408,7 +5639,7 @@ "name": "tests/integration/targets/glue_job/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "219b03f1827959550bc291b524eeb3755062239e14846766c771a7ea7a41ad33", + "chksum_sha256": "3a16879d1bdea7ac04a46c190b7c4e8b7ef5b3d5180248f9edbb3f611b967bda", "format": 1 }, { @@ -5419,766 +5650,794 @@ "format": 1 }, { - "name": "tests/integration/targets/iam_access_key", + "name": "tests/integration/targets/iam_saml_federation", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_access_key/defaults", + "name": "tests/integration/targets/iam_saml_federation/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_access_key/defaults/main.yml", + "name": "tests/integration/targets/iam_saml_federation/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "99213b23eae590103e3c4f8ee5aaa8429b55940f3c0b7a6de7dcb3a166e987a2", + "chksum_sha256": "6a07f3ca3a595c6a2919329c01b4e6b89cfba9ca4f83ef7f4410f629fa2ed48d", "format": 1 }, { - "name": "tests/integration/targets/iam_access_key/meta", + "name": "tests/integration/targets/iam_saml_federation/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_access_key/meta/main.yml", + "name": "tests/integration/targets/iam_saml_federation/files/example1.xml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f8ba8ea297b04e2716e80fa638ac17896608394165d3d74b553ee95a6e2f8a50", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_saml_federation/files/example2.xml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bd82bdc35dff86ad1fac766a10557de55de5bc5b5c1118965f791f9b76fb065", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_saml_federation/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_saml_federation/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { - "name": "tests/integration/targets/iam_access_key/tasks", + "name": "tests/integration/targets/iam_saml_federation/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_access_key/tasks/main.yml", + "name": "tests/integration/targets/iam_saml_federation/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d82e33425e32743539216716f3f6a12a3f6482b8a986cde0dde26bd8a5d7be3f", + "chksum_sha256": "7f07265800e47c83e9210e0e2cb34c25958344a09f954ef10d2d10041d6d3bb6", "format": 1 }, { - "name": "tests/integration/targets/iam_access_key/aliases", + "name": "tests/integration/targets/iam_saml_federation/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "de04f53b045a9a81f3292eb82ffb295c2f0fe852269cb835b39ee0b2f94036e2", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/iam_group", + "name": "tests/integration/targets/iam_server_certificate", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_group/defaults", + "name": "tests/integration/targets/iam_server_certificate/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_group/defaults/main.yml", + "name": "tests/integration/targets/iam_server_certificate/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3e088e1e0f22d3326400c757e02d0cd269e0f80663893ca5f479dfcc5ce75bc9", + "chksum_sha256": "5a80455c98b3da97451eec41c4911c1267377f610680daa969aeb6f8488a069f", "format": 1 }, { - "name": "tests/integration/targets/iam_group/meta", + "name": "tests/integration/targets/iam_server_certificate/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_group/meta/main.yml", + "name": "tests/integration/targets/iam_server_certificate/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", "format": 1 }, { - "name": "tests/integration/targets/iam_group/tasks", + "name": "tests/integration/targets/iam_server_certificate/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_group/tasks/main.yml", + "name": "tests/integration/targets/iam_server_certificate/tasks/generate-certs.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ac0c912c7bb892a7a8d5f17dc33de657d481210b10e7820b503a5373499dcf94", + "chksum_sha256": "0d73d7b651da3aa9d2609198f0c409d73d883486a296e4ff03236a70b93b0dbf", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_server_certificate/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbde69a30d446d0b54a0f73482a019035549d19ce9ddb0f46a9e5a636b067606", "format": 1 }, { - "name": "tests/integration/targets/iam_group/aliases", + "name": "tests/integration/targets/iam_server_certificate/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "62ba9bb260bd69bc568279317f0af0d68080353adbc971f67b4ad3005634590e", + "chksum_sha256": "f7d1dbebc857ca0831944c1cb727aa57a0c85e7bdf4aea3bc25f690219dcc430", "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy", + "name": "tests/integration/targets/inspector_target", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy/defaults", + "name": "tests/integration/targets/inspector_target/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy/defaults/main.yml", + "name": "tests/integration/targets/inspector_target/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "be9824257f10b034325d892bb2df9e94c8825853c3f6fa8464a628b3523a53e6", + "chksum_sha256": "df451e67bd5645db1e553585459f2cc2a0197d4b804575e269669ea818a78e3a", "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy/meta", + "name": "tests/integration/targets/inspector_target/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy/meta/main.yml", + "name": "tests/integration/targets/inspector_target/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy/tasks", + "name": "tests/integration/targets/inspector_target/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy/tasks/main.yml", + "name": "tests/integration/targets/inspector_target/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5e31080a6e936679c85218b356a77a4a8085db5b349cf72d9b2bbd66edc33d5d", + "chksum_sha256": "59e3367883093bd580b3ce83affeb6a449dbfad24c798bf7855fcd3f2dcb729f", "format": 1 }, { - "name": "tests/integration/targets/iam_managed_policy/aliases", + "name": "tests/integration/targets/inspector_target/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "51fe5871cae0634e784a3f226472c196a014820f9d1d62921b4f1c2ded249776", + "chksum_sha256": "34e5bb66d1cc63ebcae3934911776ed49e9f97d2016bd28c2029f6ccea543743", "format": 1 }, { - "name": "tests/integration/targets/iam_password_policy", + "name": "tests/integration/targets/inventory_aws_mq", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_password_policy/meta", + "name": "tests/integration/targets/inventory_aws_mq/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_password_policy/meta/main.yml", + "name": "tests/integration/targets/inventory_aws_mq/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { - "name": "tests/integration/targets/iam_password_policy/tasks", + "name": "tests/integration/targets/inventory_aws_mq/playbooks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_password_policy/tasks/main.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d5cb01e8acdb276cd9dfd70b6c5f8b4c0c1abeb55c5005921df9ed8c4d310b26", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_password_policy/aliases", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/tasks/find_broker.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a105d3cbc44ce95af2909fcd91224a9ed31faec15ddcd14db212cbe0098f2b75", - "format": 1 - }, - { - "name": "tests/integration/targets/iam_role", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "130e20cade65bbc57ba18d16772d846adccf6cee5ab9f6a38e02763f3965f619", "format": 1 }, { - "name": "tests/integration/targets/iam_role/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_create.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34966187803d181c469e08c7a4dcafbf58616ddbbd8db0ef989c9e1a9564e39c", "format": 1 }, { - "name": "tests/integration/targets/iam_role/defaults/main.yml", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_delete.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1982a4ea0d640a55c9aef81bb7b7e65bfa1755db7b7b0bb972726556c3c07c88", + "chksum_sha256": "204f89811b053b6e564d031a048c1b7487b939720e452c5437d3f100ce704fc3", "format": 1 }, { - "name": "tests/integration/targets/iam_role/files", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_role/files/deny-all-a.json", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a7aec67c944af4a9861e41e4bd0df9cc39a380e44ebfab585d0e5a4a0770a18b", + "chksum_sha256": "6fec9f37dd6526f4e91b3b87f506bc9affb24ae8d7a8b6eff1c90dbaca9dcc8c", "format": 1 }, { - "name": "tests/integration/targets/iam_role/files/deny-all-b.json", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/create_inventory_config.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d97b3f666d3daa474b035db329d8d39b4bff4ff6059ea42ebede391226681bb6", + "chksum_sha256": "3776cdbd09671c982db46a9cc70479f001a18fa42a0438b055acb4bb133dc8e9", "format": 1 }, { - "name": "tests/integration/targets/iam_role/files/deny-all.json", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/empty_inventory_config.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d55efc743254858a5f881bf83db9d77024bee4519607261ad81417883ec3865b", + "chksum_sha256": "e5ed1deef488792c766710357155adc4c93554a3aee60229879547c957336b8e", "format": 1 }, { - "name": "tests/integration/targets/iam_role/files/deny-assume.json", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/populate_cache.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "252bc63ef45bb6343320a9afacb88299ac8badf8b2cfbb2ecfd0a443dc28fa2f", + "chksum_sha256": "6f11b4980369826c6213479c20067aea4d155e9e77521853e6fc8d6855f0c163", "format": 1 }, { - "name": "tests/integration/targets/iam_role/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iam_role/meta/main.yml", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/setup_instance.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "2a6f5fc609c3da574711edb90d343b177dc4fa0c5719b879eb558f6b33c8b235", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/inventory_aws_mq/playbooks/test_invalid_aws_mq_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6b405ef5a1076c851e42127eff82061a140c518bde73d0e30a8ed016e279861", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/boundary_policy.yml", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_cache.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aea923db757c0adc020df34045dcec9e18525a54d6b5f754bd63561670b1bc87", + "chksum_sha256": "e869abd699257c933eca478ad5fa463987a55872646ab43d97c86dcf001e3e7c", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/complex_role_creation.yml", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_no_hosts.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7e63f22d7ab7421252e22b08c866a7806a068f1b44ea104eb496b53b90a20803", + "chksum_sha256": "99e6674b7c3a9f5ed8f2b32ef4a234f4976690f1f2ece48dc45efb90dd8eb934", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/creation_deletion.yml", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7688d37cd53363ac698cd415f40f9524f5a3c2216f905c0cedf0847b9f7e6652", + "chksum_sha256": "450af18658de10dae7d079614fea912bde84be21f869016df9ca79e28880a018", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/description_update.yml", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cc6bd8aeaa144cdc6aabb723d463e891b7e741b1acc5546e0087f891d4cd9be4", + "chksum_sha256": "19f2d7d72754eeb8f4d17fa6122cf19302d9d6c05c1eab276a55193e4177c4a1", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/inline_policy_update.yml", + "name": "tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory_with_constructed.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cd98b995e62c3592ee32165ba9937ffb0c7db943d179f8a81d85237daaaed546", + "chksum_sha256": "2dc4c41ebac8d853338f52bb869cb7b11ebe81e9e2ff0921ebba241388594564", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46745078e247345abb75ffe8ee2f448286916d86b0a492498542875cf11c9557", + "name": "tests/integration/targets/inventory_aws_mq/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/max_session_update.yml", + "name": "tests/integration/targets/inventory_aws_mq/templates/inventory.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ccd7f9976ed55a32780181039bd857dfc9799b89e5c18b4d3a564fa55fd125e2", + "chksum_sha256": "605948bfa1c2df50c72b727154c525cfcfd048404b8dedab5cdab7b6ae9f3844", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/parameter_checks.yml", + "name": "tests/integration/targets/inventory_aws_mq/templates/inventory_with_cache.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "56fed457d2502b5f295c36de6217ed085745bd0b5a4af177e5eae2cef6e96801", + "chksum_sha256": "e6c14cc965385e9780e55398b73557720496a8f7248a2c4bfb637c36eb2bbefa", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/policy_update.yml", + "name": "tests/integration/targets/inventory_aws_mq/templates/inventory_with_constructed.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43164d5214899ca9ff30794c037fd76602b2825659deb7a95cc1b316c95f4193", + "chksum_sha256": "c6a2ddaab5a6f5235c1c6532eba49152221885eaff65704d45256095a18a892c", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/role_removal.yml", + "name": "tests/integration/targets/inventory_aws_mq/templates/inventory_with_hostvars_prefix_suffix.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0c83916a080b19600cc6705cb502584e8bcf555a3c6c0252e18a8eabc0617c1e", + "chksum_sha256": "23ae87d2d8f8657b81fc876522e817dc141d0a12a82c16c4962e02b8f8e235e7", "format": 1 }, { - "name": "tests/integration/targets/iam_role/tasks/tags_update.yml", + "name": "tests/integration/targets/inventory_aws_mq/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5f9bdd35dac1f840e2d4155dad4b59e7873639e6c23536d87293ee70f6af945f", + "chksum_sha256": "145ea3960d3bb31522c85234d7aa3a034de27220b6b9a7cb71de615a26408dc5", "format": 1 }, { - "name": "tests/integration/targets/iam_role/aliases", + "name": "tests/integration/targets/inventory_aws_mq/runme.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "97a5ed23d5e2692fa5bcf34e35534a92ae914467b71a3f846b6459d4f28cddf4", + "chksum_sha256": "15f08fc1eb97f83e018413408f4eccd5f63d720c1cb3133cf4b0ed70d1a2514c", "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation", + "name": "tests/integration/targets/kinesis_stream", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/defaults", + "name": "tests/integration/targets/kinesis_stream/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/defaults/main.yml", + "name": "tests/integration/targets/kinesis_stream/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6a07f3ca3a595c6a2919329c01b4e6b89cfba9ca4f83ef7f4410f629fa2ed48d", + "chksum_sha256": "af176362a6d7494b15c3cb413f4a455d85e9d5ac42ef519f66b496c042e34e2a", "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/files", + "name": "tests/integration/targets/kinesis_stream/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/files/example1.xml", + "name": "tests/integration/targets/kinesis_stream/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f8ba8ea297b04e2716e80fa638ac17896608394165d3d74b553ee95a6e2f8a50", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/files/example2.xml", + "name": "tests/integration/targets/kinesis_stream/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kinesis_stream/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3bd82bdc35dff86ad1fac766a10557de55de5bc5b5c1118965f791f9b76fb065", + "chksum_sha256": "c8f0a40573fd9bc37570a0439aebb357d2a745c8f25a89522f16d975e40630f9", "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/kinesis_stream/tasks/test_encryption.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b6f0dd4b2ceb7e4689b4b6ad84c29b6ea6d323f5a1e4e217085ae057b477d5", "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/meta/main.yml", + "name": "tests/integration/targets/kinesis_stream/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "d691899dadb2cbc37b7f0b9234ff8728c38fadedfda3dbcfcbcdda9c6142436e", "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/tasks", + "name": "tests/integration/targets/legacy_missing_tests", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/tasks/main.yml", + "name": "tests/integration/targets/legacy_missing_tests/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/legacy_missing_tests/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "10c9a45641b9b1b275ea36b025ba4350fb058a9962cbe02604810f3aceb9dd28", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { - "name": "tests/integration/targets/iam_saml_federation/aliases", + "name": "tests/integration/targets/legacy_missing_tests/README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "6b5def9406496656df49a49402ce869edc3db0ed88c2e2796e1a3bb6d486cb38", "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate", + "name": "tests/integration/targets/legacy_missing_tests/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9d7997aa08e6b143d74e6dcd8cf9b2bbf78bd89b225c181478f9c5d76a13d02", + "format": 1 + }, + { + "name": "tests/integration/targets/lightsail", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate/defaults", + "name": "tests/integration/targets/lightsail/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate/defaults/main.yml", + "name": "tests/integration/targets/lightsail/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5a80455c98b3da97451eec41c4911c1267377f610680daa969aeb6f8488a069f", + "chksum_sha256": "412ce5355682725393405de038e10eaeaf708b843a10aa1a77f6b9b59ce5a332", "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate/meta", + "name": "tests/integration/targets/lightsail/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate/meta/main.yml", + "name": "tests/integration/targets/lightsail/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate/tasks", + "name": "tests/integration/targets/lightsail/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate/tasks/generate-certs.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d73d7b651da3aa9d2609198f0c409d73d883486a296e4ff03236a70b93b0dbf", - "format": 1 - }, - { - "name": "tests/integration/targets/iam_server_certificate/tasks/main.yml", + "name": "tests/integration/targets/lightsail/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "98a7106748593f81b6c138395afe4e7b7ed97b427ee0028b9d8314b8bafa7b29", + "chksum_sha256": "b9868e14ab6d1fb24841a6f2d2f5a7e4a290ec0e52a6bb05ea327b0b1f41793a", "format": 1 }, { - "name": "tests/integration/targets/iam_server_certificate/aliases", + "name": "tests/integration/targets/lightsail/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f7d1dbebc857ca0831944c1cb727aa57a0c85e7bdf4aea3bc25f690219dcc430", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/inspector_target", + "name": "tests/integration/targets/lightsail_snapshot", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inspector_target/defaults", + "name": "tests/integration/targets/lightsail_snapshot/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inspector_target/defaults/main.yml", + "name": "tests/integration/targets/lightsail_snapshot/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "df451e67bd5645db1e553585459f2cc2a0197d4b804575e269669ea818a78e3a", + "chksum_sha256": "702f2ba8c0d93c651ad246ba5f791f549f5f7f41ef141ce9b72185cd14df75a2", "format": 1 }, { - "name": "tests/integration/targets/inspector_target/meta", + "name": "tests/integration/targets/lightsail_snapshot/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inspector_target/meta/main.yml", + "name": "tests/integration/targets/lightsail_snapshot/meta/main.yml", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { - "name": "tests/integration/targets/inspector_target/tasks", + "name": "tests/integration/targets/lightsail_snapshot/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inspector_target/tasks/main.yml", + "name": "tests/integration/targets/lightsail_snapshot/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "59d66364df7f01a095e473081885ef143aad97f3edf43ab6d21029a19fab2ea7", + "chksum_sha256": "f8d88a6fb793610b17d30ae07ae901844ef86a59c4fc4afe4eb53278241c7343", "format": 1 }, { - "name": "tests/integration/targets/inspector_target/aliases", + "name": "tests/integration/targets/lightsail_snapshot/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "34e5bb66d1cc63ebcae3934911776ed49e9f97d2016bd28c2029f6ccea543743", - "format": 1 - }, - { - "name": "tests/integration/targets/kinesis_stream", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/kinesis_stream/defaults", + "name": "tests/integration/targets/lightsail_static_ip", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kinesis_stream/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "af176362a6d7494b15c3cb413f4a455d85e9d5ac42ef519f66b496c042e34e2a", - "format": 1 - }, - { - "name": "tests/integration/targets/kinesis_stream/meta", + "name": "tests/integration/targets/lightsail_static_ip/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kinesis_stream/meta/main.yml", + "name": "tests/integration/targets/lightsail_static_ip/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "1fda63eccc1ffd3f1b83fe4538498c152a594647be1911bf550b0afd1cf73e56", "format": 1 }, { - "name": "tests/integration/targets/kinesis_stream/tasks", + "name": "tests/integration/targets/lightsail_static_ip/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kinesis_stream/tasks/main.yml", + "name": "tests/integration/targets/lightsail_static_ip/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e4a11066a45915cb86de6f86349a5e75eb55ea4965625f2839c40a1bb08b7164", + "chksum_sha256": "e4ab3a46dc1c5192a960d3f20e69aa274e3cf19ef76ee928abf02b6186f6b0f1", "format": 1 }, { - "name": "tests/integration/targets/kinesis_stream/tasks/test_encryption.yml", + "name": "tests/integration/targets/lightsail_static_ip/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "88b6f0dd4b2ceb7e4689b4b6ad84c29b6ea6d323f5a1e4e217085ae057b477d5", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/kinesis_stream/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d691899dadb2cbc37b7f0b9234ff8728c38fadedfda3dbcfcbcdda9c6142436e", + "name": "tests/integration/targets/mq", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests", + "name": "tests/integration/targets/mq/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests/meta", + "name": "tests/integration/targets/mq/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23afa4e31a7b706d88d6511814b6475d1a284bfaf89f674f011b9e24682f0ffc", + "format": 1 + }, + { + "name": "tests/integration/targets/mq/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests/meta/main.yml", + "name": "tests/integration/targets/mq/files/broker_cfg.1.xml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "7ab097f5f6e9f5139ce2dfed5015aafa235c3b1b2222fc93c3b6c6a8cbb7b9e8", "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests/README.md", + "name": "tests/integration/targets/mq/files/broker_cfg.1a.xml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6b5def9406496656df49a49402ce869edc3db0ed88c2e2796e1a3bb6d486cb38", + "chksum_sha256": "45ec2a4d3a6963916da6215971ef65cc99143779e8cb5cf3c73fa3d9eed5f9bd", "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests/aliases", + "name": "tests/integration/targets/mq/files/broker_cfg.2.xml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ac8293442cce4eca766c53485a5012b1bdb5845f80c06a1cbcb1597323cf3ab0", + "chksum_sha256": "504e0e18bf825921be2c0e599c061dddda5b215aee9b0afdafc1f2b00c139626", "format": 1 }, { - "name": "tests/integration/targets/lightsail", + "name": "tests/integration/targets/mq/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lightsail/defaults", + "name": "tests/integration/targets/mq/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/mq/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lightsail/defaults/main.yml", + "name": "tests/integration/targets/mq/tasks/broker_cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "412ce5355682725393405de038e10eaeaf708b843a10aa1a77f6b9b59ce5a332", + "chksum_sha256": "7849533decca144b6aea20936fd1bf99896f0a075a7cdd63d1bf6d09374848ec", "format": 1 }, { - "name": "tests/integration/targets/lightsail/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/mq/tasks/broker_config_tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5cd6b42f31159d72000b3452e8c0adf68f6701924356f006f97ebaf253f9c36", "format": 1 }, { - "name": "tests/integration/targets/lightsail/meta/main.yml", + "name": "tests/integration/targets/mq/tasks/broker_delete_tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "1aff980ae48a08a717e88e9da9be8325c95f6f228e044cdf0744257505421b63", "format": 1 }, { - "name": "tests/integration/targets/lightsail/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/mq/tasks/broker_tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0c7a612de2e8f4541a3aa1710798cf9dc72244fa7e8ad6f4c34d7030160dd3a", "format": 1 }, { - "name": "tests/integration/targets/lightsail/tasks/main.yml", + "name": "tests/integration/targets/mq/tasks/broker_user_info_tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6b3334073c295665f4eed394c039c45c274bddfea692b7b4bc2c9496f97080b9", + "chksum_sha256": "70044dc25ce55f9181017231b40495182bfc19d5b85817ce930deac653083591", "format": 1 }, { - "name": "tests/integration/targets/lightsail/aliases", + "name": "tests/integration/targets/mq/tasks/broker_user_tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "361b8f4b9341c56df652066066ddeaf24aa79c75b34cec1471b5121d76406c8f", "format": 1 }, { - "name": "tests/integration/targets/lightsail_static_ip", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/mq/tasks/env_cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "625fd0423b76e3e78fc7f3c0359ce919f9d7d93f74f64bf92b94f9988351a0fe", "format": 1 }, { - "name": "tests/integration/targets/lightsail_static_ip/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/mq/tasks/env_setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd3497e98587a07515fd2fd6c30455c90d97dd5da384ed6acd0ecda124b82664", "format": 1 }, { - "name": "tests/integration/targets/lightsail_static_ip/defaults/main.yml", + "name": "tests/integration/targets/mq/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1fda63eccc1ffd3f1b83fe4538498c152a594647be1911bf550b0afd1cf73e56", + "chksum_sha256": "12550040834d414ea54a8939bda4292cf0b077ece00f0edd80ece06995dfbc24", "format": 1 }, { - "name": "tests/integration/targets/lightsail_static_ip/tasks", + "name": "tests/integration/targets/mq/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lightsail_static_ip/tasks/main.yml", + "name": "tests/integration/targets/mq/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5747d0da6dccc9cf15411438ce07223e3c412d14287944213d5c0c8750c822ec", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", "format": 1 }, { - "name": "tests/integration/targets/lightsail_static_ip/aliases", + "name": "tests/integration/targets/mq/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "0a258e27c31a3495929692dd2809edc2c43c434790ca756f841f19a57d20cd08", "format": 1 }, { @@ -6227,14 +6486,14 @@ "name": "tests/integration/targets/msk_cluster-auth/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a0230610bafd4916f640e41e03004852ae28d262909d7c018aa6306cf8615b05", + "chksum_sha256": "ecea6a84ede483a243de16e53c3fa7d08174b09eabae1a24c921c808465b16b6", "format": 1 }, { "name": "tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9c16cb829596c3b2a09b00ab1906409ede7b37ca30318477e86610415962033c", + "chksum_sha256": "1f87bc8634bb38d681db1598f3cf0560638b667a7b4ccf8cdc420703d1a895d2", "format": 1 }, { @@ -6290,28 +6549,28 @@ "name": "tests/integration/targets/msk_cluster/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a4032a16508df0c6c9ad877dd4b6abef591c11d37dc4e8a0fefe75a1cc9008a6", + "chksum_sha256": "1cc1783a8eab0c579d89dc0d4536deb84219b32d6a8956337b1e6ac60468dc1f", "format": 1 }, { "name": "tests/integration/targets/msk_cluster/tasks/test_create.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "999cb11aa1ca7044c3e10856c6141b2bbfcbd454acdc9593b6bfe4996ffff00a", + "chksum_sha256": "6065cb2d3e5d9b69c4393cc7598f9ca3acb9dd8b674086b090ee6ae58b777505", "format": 1 }, { "name": "tests/integration/targets/msk_cluster/tasks/test_delete.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a3dc9b8d60fd23db2dc9cca33b3be0169c2302fd2a39cbff4f838f21e3ff8561", + "chksum_sha256": "52f026ae8229bd30d731ba6d90f3d37c60b172d5614c9a79eaa7fe1cfb206460", "format": 1 }, { "name": "tests/integration/targets/msk_cluster/tasks/test_update.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2baca7e803d0b95bade9371668b8c3f39f5b1fd58e8af6e59b12874df2cb67bb", + "chksum_sha256": "aa6b80606d428610d10fae177b9387a58793eef3dd488b29070c1e35ec2df6c1", "format": 1 }, { @@ -6367,7 +6626,7 @@ "name": "tests/integration/targets/msk_config/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "32707ed8df23393a4aba3d346bc01a3ca0cb96e4ae2ffa32ad00eb946be7edc3", + "chksum_sha256": "3f2d8717cf6ec5a0be82042daff04341a84a9b17e0ee4bab36926437e4ce6728", "format": 1 }, { @@ -6437,7 +6696,7 @@ "name": "tests/integration/targets/networkfirewall/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b8cbd969a439ec671c632a75147634023d23c1f1af0da49e133b860f397609d6", + "chksum_sha256": "61c31c35650bee38558f91ff72dfe03abb4bc97f30f6ee6372e6a7d4baf48e72", "format": 1 }, { @@ -6493,7 +6752,7 @@ "name": "tests/integration/targets/networkfirewall_policy/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43088f43a4d8ce8720c7af4dd83ae833d4f1de0a7eccefba52daec468797c4dc", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { @@ -6528,28 +6787,28 @@ "name": "tests/integration/targets/networkfirewall_policy/tasks/default_order.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "89cad87a3b5189c4f12ad91ad0f65b1ff3bbbdb807bcdcb82a979208aab9c3fe", + "chksum_sha256": "e3beb8ace412edc213ee95d80d93406e4fba4f2dc6c138db49dc631276e8486b", "format": 1 }, { "name": "tests/integration/targets/networkfirewall_policy/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0c5d7a955ead7a3fce020dbc75956d640a59c7f6a9ee2676e33607315306ef91", + "chksum_sha256": "768efc0bcf3a4c68df4a94121bcc1280b8e20bbd829284e74f10479954ad78dc", "format": 1 }, { "name": "tests/integration/targets/networkfirewall_policy/tasks/setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "277304c5175b3cd3a6a1fc08d0697a2d10e70314e37043d939278a0ae6570149", + "chksum_sha256": "599e0ef3d77995c82aa051d947a1ce7232c9e1829490f030fb988a11588af4f6", "format": 1 }, { "name": "tests/integration/targets/networkfirewall_policy/tasks/strict_order.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ab171d5fd73cde4f8f6d56a58474b088b47a112aefb51633ba511304e6ce90bc", + "chksum_sha256": "5b00d462ddfbf7e633ede3449b3e2347045102d0e0e183aa13ab08d79c1c518e", "format": 1 }, { @@ -6591,7 +6850,7 @@ "name": "tests/integration/targets/networkfirewall_rule_group/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43088f43a4d8ce8720c7af4dd83ae833d4f1de0a7eccefba52daec468797c4dc", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { @@ -6626,7 +6885,7 @@ "name": "tests/integration/targets/networkfirewall_rule_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "86a3101c318f618ae832ccfcac5ca5a994f53aa4132d01935622ca96f99189b4", + "chksum_sha256": "87fbae761f586d4825ce8318835862caed45eb4e0948f96396766045daa3c5e5", "format": 1 }, { @@ -6654,14 +6913,14 @@ "name": "tests/integration/targets/networkfirewall_rule_group/tasks/stateful.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3677e8d078f43ddb3a0de49677bbcd0f218ae8dfcfeb5b787a9c62cf3f3afc0f", + "chksum_sha256": "90dee38138d48c688ed47aac6314878de5859707e0c8e1f18d4cf1491a262850", "format": 1 }, { "name": "tests/integration/targets/networkfirewall_rule_group/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "35a0c2d65024dae4341e373aa598a54a614ef858cf11c795ed83ea10adaf0238", + "chksum_sha256": "c1656176c76d62eac2c32ffebbeef63835ec7c1d49aa37a64ce00bb343594ea9", "format": 1 }, { @@ -6710,7 +6969,7 @@ "name": "tests/integration/targets/opensearch/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e415851f75e86d82f70418129acffd9a207e6640a0d7fc34d30fa88560bbb153", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { @@ -6724,28 +6983,28 @@ "name": "tests/integration/targets/opensearch/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "71e6a4750f3c03cd4847f6b0d55836dc2fcb63b8b6fd2da9f43e0c8c833d6099", + "chksum_sha256": "61fe1f21f3bc7660715b3ca1a40af263e2da797ae79b723723aff3996d8be321", "format": 1 }, { "name": "tests/integration/targets/opensearch/tasks/test_create_cert.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aae6d7c07a94e14ae26295cc57194f53b546022df9e79e343f3ee3ad970950f4", + "chksum_sha256": "4902ef5fd11c259a4c1bff5159d2e6b890a7497979702d4ca3ccc3c796e51f62", "format": 1 }, { "name": "tests/integration/targets/opensearch/tasks/test_delete_resources.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "274556ec5aaabba73d362bff3565a6acfd6b14a603f8fa511e58846f3e8af080", + "chksum_sha256": "95c3d1dce9453b5cf1809d6b71b495c079d3a4a70ab16f56e3ffbf46b700e1e2", "format": 1 }, { "name": "tests/integration/targets/opensearch/tasks/test_delete_vpc_resources.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0ae272b8fa61fecd36a3b381c4cb05c48e81d6ccc517464a25ace1573332d816", + "chksum_sha256": "448095d236463c59f566fee92a3685abbadeaaf18c07a03ddb8767afc1dab370", "format": 1 }, { @@ -6759,7 +7018,7 @@ "name": "tests/integration/targets/opensearch/tasks/test_vpc_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0fa749aca00d80d1ec1986ca702270c0f9c8bb276c4fd44493124a825bb1d20f", + "chksum_sha256": "f9b2a4628ad4585bc49a9fa83eda6443b38917f28a93304bbaa7615b8055b50d", "format": 1 }, { @@ -6829,7 +7088,7 @@ "name": "tests/integration/targets/redshift/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1c223ce7894790cae476629878079b47a13c9dd18c6f3805a91feaf72957811a", + "chksum_sha256": "861e2a9f87f02cd39c99af7a9ed2ab0a283891e432677738a79c77aff6ca124f", "format": 1 }, { @@ -6885,7 +7144,7 @@ "name": "tests/integration/targets/redshift_subnet_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1e311cba6981a7094c303486231d73b4f227388d3449e23d743cbd39f75584cb", + "chksum_sha256": "0357fd5816f9a6363c5fd3557b2fcb9d6e75af20d443971b50a74b22640fe71f", "format": 1 }, { @@ -6896,70 +7155,28 @@ "format": 1 }, { - "name": "tests/integration/targets/s3_bucket_info", + "name": "tests/integration/targets/route53_wait", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket_info/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/s3_bucket_info/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "33402070a2915fd11eed690eae64db9b6f22f4557e808dff121ed456bebdd66d", - "format": 1 - }, - { - "name": "tests/integration/targets/s3_bucket_info/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/s3_bucket_info/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", - "format": 1 - }, - { - "name": "tests/integration/targets/s3_bucket_info/tasks", + "name": "tests/integration/targets/route53_wait/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket_info/tasks/basic.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2ee7b012473cfeaa0b7ddd2cfb6810facf41db62a9939c1b7ab9865caadec952", - "format": 1 - }, - { - "name": "tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f617729c54e82e71a5d983ca0e96750a1a4f0db1b465c9a1e7875badb055aa7d", - "format": 1 - }, - { - "name": "tests/integration/targets/s3_bucket_info/tasks/main.yml", + "name": "tests/integration/targets/route53_wait/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0befaabe1ba20aa95d63f544d87e369c10ffe2d7c9a03e3b644310982d5c61b5", + "chksum_sha256": "f8da2c6560642436a25efc07bea2a785ee03f107bf5b5642a4eece46dd9b91a4", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket_info/aliases", + "name": "tests/integration/targets/route53_wait/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", @@ -7004,7 +7221,7 @@ "name": "tests/integration/targets/s3_bucket_notification/files/mini_lambda.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5b6c842868fed1950e92f8d1824c098cecbbdd2d86cfa47aac01f16182af33b3", + "chksum_sha256": "671d4727fcf4c863c14fd3175714ac081cc990fa4c4e53d329fd36640945f473", "format": 1 }, { @@ -7032,7 +7249,7 @@ "name": "tests/integration/targets/s3_bucket_notification/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ff216849a0c08ade565ccc1e4a568dd252119c9b2c121d5ccd79608bcdd23ce2", + "chksum_sha256": "71876709f82e0937857ce1deb288d098217836f24fe92c0e267a766e06367411", "format": 1 }, { @@ -7088,7 +7305,7 @@ "name": "tests/integration/targets/s3_lifecycle/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4b50f8037d4988b787046cea58b8cf5f8fed70b81c6de886552d5fc8af2f8d30", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", "format": 1 }, { @@ -7102,14 +7319,14 @@ "name": "tests/integration/targets/s3_lifecycle/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "714b6d0f74ba602bfe6771187d386952d8343129e679b47393b91c9d1fd7c6fa", + "chksum_sha256": "72423890b54eaae16093a60debac1f8038eb7b9e2c2032c9672958d6736aa826", "format": 1 }, { "name": "tests/integration/targets/s3_lifecycle/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "4a15fa78cf44660cba4c272436a92a637c1625a7febd23da07f9ebbf9f6c9c30", "format": 1 }, { @@ -7158,7 +7375,7 @@ "name": "tests/integration/targets/s3_logging/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6cc04364e8ab882de972a10f892efc1f3fc520582e8bba8fab9949810b30153c", + "chksum_sha256": "076b0a36063f816bc24017b40f6ccba5fc48dc8914ebe4ad06a0b36bb8373780", "format": 1 }, { @@ -7214,14 +7431,14 @@ "name": "tests/integration/targets/s3_metrics_configuration/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b08f6993b03927d884628237ed15a3fa3e7b916360702e0b1367cc77470ca477", + "chksum_sha256": "89d19c53d47f6202592167fd0f12d5b9a7d1fd895bd0e57f5fcf17a7142bc910", "format": 1 }, { "name": "tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f961c7cc94b4f019821227b0b68a7e25d18f45a0558ecc7e709d1a4660bd560c", + "chksum_sha256": "9477908a7bca8d7f957ec883e6235dd9d562967de81491d0ced566a696b0307f", "format": 1 }, { @@ -7305,7 +7522,7 @@ "name": "tests/integration/targets/s3_sync/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c17f555cfa7fb2639a149b4b3e08e94a4a6a68b5998ce3f9ab2a807451083dd4", + "chksum_sha256": "06e5560596ffbbada8288f0014d82d90b0c51111b0c5341b6199ddefebd7f841", "format": 1 }, { @@ -7382,28 +7599,28 @@ "name": "tests/integration/targets/secretsmanager_secret/tasks/basic.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6d4548f6028cd71381f59f8ec815a160cc53300c56d77f7314c487b4a48d2259", + "chksum_sha256": "7f4ff9f7987b18b686263d997d7e0d811448daf51fa4c81b77b100b3d226dd00", "format": 1 }, { "name": "tests/integration/targets/secretsmanager_secret/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9b3456cfb720c2ca7ff84800db3292959fdf0da58fd287e166f8a0f35317ac5b", + "chksum_sha256": "56cb7a83210085d2419a7e93c47f33966110e07b33f67bd1f09fb4edf479910b", "format": 1 }, { "name": "tests/integration/targets/secretsmanager_secret/tasks/replication.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ee785f54c4e4da1ae179c6239948be24a0f847bcb17c0876ab96b5147bef2871", + "chksum_sha256": "6d683df89cb14081f2c14d18fc2c87e0b57c411f04e9ca1693789ff8e3a71abe", "format": 1 }, { "name": "tests/integration/targets/secretsmanager_secret/tasks/rotation.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3be0b1305907aa6486e80aa0883f991ed51f3cba6e1774284e23836599fcebe0", + "chksum_sha256": "01d2a81ec50be52c30ed22608cc917025834e709a3e43ded577e7676c34e5e57", "format": 1 }, { @@ -7424,7 +7641,7 @@ "name": "tests/integration/targets/secretsmanager_secret/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "f2acc4ab3ba6bf2b9cb8a3f28e7b5d4fb6a592d08fabe6b685a15a2b7b63095e", "format": 1 }, { @@ -7473,14 +7690,14 @@ "name": "tests/integration/targets/ses_identity/tasks/assert_defaults.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "03dcfcf5b99e5ffe1945e0e38aaab20b6604cc456b760801875fe070ff79042b", + "chksum_sha256": "a606c70c84e58947d1dc5922a284c37eda59cb7919995182ca63f9cb085e6fa2", "format": 1 }, { "name": "tests/integration/targets/ses_identity/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "93b1492cdcb8a8572cce9d07df4227bf22e90f325003215591329e51f1e4b221", + "chksum_sha256": "b5733e39813cc6c8ca99162d22143f0118fe22f518375d2f8260793f031e3a5b", "format": 1 }, { @@ -7536,7 +7753,7 @@ "name": "tests/integration/targets/ses_identity_policy/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "db37298600b429e677db25e1455dceb1f11b29a0b5fdb1f894449a56ec34f343", + "chksum_sha256": "623ebf3939139a5ff8e41e48e32068ed4bb078030affd81cbc45f7b7f7f06cf6", "format": 1 }, { @@ -7606,28 +7823,28 @@ "name": "tests/integration/targets/ses_rule_set/tasks/active-rule-set-tests.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e91a4532657a8afbf178548d91a0957ddb795245cc9be739395ba7a0375c0a29", + "chksum_sha256": "bf5e5f71623fe45f5350598341675684e3900cbde393c47bef674fe8c5b07a86", "format": 1 }, { "name": "tests/integration/targets/ses_rule_set/tasks/cleanup-lock.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8b863fce4c771fe11a9c455767c38756ea09178f41289eff6ea6914e3c45bd39", + "chksum_sha256": "0cbab74ce7c6a4e160178f241e2e450e9b151d781e3e54f60bd6649575558e75", "format": 1 }, { "name": "tests/integration/targets/ses_rule_set/tasks/inactive-rule-set-tests.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6b5cac1e3eb4ec04f71871404dc1d365388cbb6b7e797832ab6fde48d607e8e2", + "chksum_sha256": "ece5ab84368bc2e61850b0afa3df96a68594e771b6192ed40026def5b4915227", "format": 1 }, { "name": "tests/integration/targets/ses_rule_set/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c5877efea4d17f0f90623dbd47d93887c1da81dfa4b394e39dcb2a68447cb7ff", + "chksum_sha256": "9c31992c53e3208b7792b3fd03c4a2e3f4bd86f98560622eaa469b3539ca74bf", "format": 1 }, { @@ -7669,7 +7886,7 @@ "name": "tests/integration/targets/setup_botocore_pip/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "982778216860d979fd936609ed62defea823593c1607059c898dc75e08d7498e", + "chksum_sha256": "d895b8c4daee4f5a0a8e38789b1916b13193855a395c7d601f1b27bf30bef11e", "format": 1 }, { @@ -7739,7 +7956,7 @@ "name": "tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "22db11d66aa0d3996727b793db5152e2277188feb1814f1046289249341dcaaa", + "chksum_sha256": "3c7a991fa08be350bcf60746b7c3e5db931090b82cb4f32488da548d74641fa5", "format": 1 }, { @@ -7774,14 +7991,14 @@ "name": "tests/integration/targets/setup_connection_aws_ssm/tasks/cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "68f1fb1a192947f04812322a2540d6b167662e599e0226ac6d41bd574976c49a", + "chksum_sha256": "6bf05a9289910a43c5d6adfd078beb2d30e232721147226d9b3c0ae6d16fe92a", "format": 1 }, { "name": "tests/integration/targets/setup_connection_aws_ssm/tasks/connection_args.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3024b12fcc79bdaf6bc5b153492ba13a895f22ec29a27dcb6f5ecc60b6e8589b", + "chksum_sha256": "5d5f6ae82bb735bb77cee3b614e98c1becc1455c5783b0603b8147cbf6485513", "format": 1 }, { @@ -7802,14 +8019,14 @@ "name": "tests/integration/targets/setup_connection_aws_ssm/tasks/encryption.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ca7c1eff754826a0bb0dc6dc411681c650f0d5144f038514c4052fda0dd60fb0", + "chksum_sha256": "bf5f7e849621f7a2902b7e69070d797c4af79c2fd179cc7165323e56c06a6a70", "format": 1 }, { "name": "tests/integration/targets/setup_connection_aws_ssm/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c67410eea59d53200e8f15f6417b100dc8a65a3e67c348f16a40feefe0f0d3ea", + "chksum_sha256": "9d1a04bf9a501b2ed63be47f389981c012043e4fe736e107c812c709fe1e9c2e", "format": 1 }, { @@ -7921,7 +8138,7 @@ "name": "tests/integration/targets/setup_ec2_facts/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c08635426696cb1d74d61cf8101bb4f912c66b5b4e17d78cdbdc45f73fcb0b91", + "chksum_sha256": "ce17a592baaac4abb366317269ef9c96ef83d3820126ff3e768e58e81d1ec3aa", "format": 1 }, { @@ -7935,7 +8152,7 @@ "name": "tests/integration/targets/setup_ec2_facts/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { @@ -7949,7 +8166,7 @@ "name": "tests/integration/targets/setup_ec2_facts/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7d03d30a328d5758d05cec67692df82d906021b2d9823c6a67e8c3f51cd057d1", + "chksum_sha256": "b96ad83df89f7d55f3f7c7d2ba558a9d1277420e4e9be8c577a07b9733a8cf15", "format": 1 }, { @@ -8047,7 +8264,7 @@ "name": "tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fc12adf84deacbe3a14c4da31f5b7bbfcf57e4dc2cd7e4693e5a991a6efbaf3b", + "chksum_sha256": "6eef2cb8a0cdc57026118069bf9899304e544e2f9a0b36864f9d5420cd010055", "format": 1 }, { @@ -8124,7 +8341,7 @@ "name": "tests/integration/targets/sns/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bcdbf0fb7a0f8da36e9d01bb163cb3127a6843fe44117f1a7073ca41b28d5766", + "chksum_sha256": "645340198ebd7969210eae8b7e09036f4ad061c900ac415955b11f30e4f01559", "format": 1 }, { @@ -8173,7 +8390,7 @@ "name": "tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "26dc52059ed47d477d85a8c8fb6ae3e3dfec306d05e2ff29d62dcad98d2fb665", + "chksum_sha256": "8f8328a6a87a4aa960fd6473bb44380943474b40012f24df9f31458a1b811315", "format": 1 }, { @@ -8208,7 +8425,7 @@ "name": "tests/integration/targets/sns_topic/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bfe7c8f2d0853490e5968ff41eea72fe9a4cacff8c5d04013180800a3c444a85", + "chksum_sha256": "00bff28386d44b4c5681c9ab8852075291b3999c2bd8490798605ad84d8d4154", "format": 1 }, { @@ -8285,7 +8502,7 @@ "name": "tests/integration/targets/sqs_queue/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fc5ddbcf2f3769f063e416edbb723fd2c3dae40e7352948ac142ee709b85a3df", + "chksum_sha256": "bd73c6b8ceaa04b0e0e4c2f0f7a050972c8b7c00899523f4a156be93f37bdf27", "format": 1 }, { @@ -8341,7 +8558,7 @@ "name": "tests/integration/targets/ssm_parameter/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ba7ec7d2d63618f7542cb8d9a7c7fdd28dc2524e0a1f703542ab5e9269c6d03c", + "chksum_sha256": "be7c36734a74e07b0be2a976cd73be59fae415504c60153deba28dc8c865e0f2", "format": 1 }, { @@ -8425,7 +8642,7 @@ "name": "tests/integration/targets/stepfunctions_state_machine/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b24edc9fa457fd5cb5439cd92c14a1d5dd5cf52ba497522f9067e187735a78a3", + "chksum_sha256": "3c54bb8bbddeff16ad77febc3d67ba6d370f1a177e1671296acef81607a08048", "format": 1 }, { @@ -8436,76 +8653,6 @@ "format": 1 }, { - "name": "tests/integration/targets/sts_assume_role", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8cb5b3eb32b87c1acba7a138fb9a444d9c1dcbd740a73a60ac2edd376b88b44b", - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "272702cb0ed24cb574e8b0f13f3003bdcb3d26aa5eeb5fdcb17856e02786b678", - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/templates/policy.json.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aad7dcbd5c5a4650004b5455525bcff7ef9780b55b09bbf1b49369456ad7ae06", - "format": 1 - }, - { - "name": "tests/integration/targets/sts_assume_role/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", - "format": 1 - }, - { "name": "tests/integration/targets/sts_session_token", "ftype": "dir", "chksum_type": null, @@ -8551,7 +8698,7 @@ "name": "tests/integration/targets/sts_session_token/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "797a6ce7ecbd4bcce15322cf96da3d40c724ce61aaf76f239e5cdfdc1334dc58", + "chksum_sha256": "2f9b107bbc83c7c23ddf46839de59eeda2a89768d0f16a25e8427369ca5f7cfe", "format": 1 }, { @@ -8593,7 +8740,7 @@ "name": "tests/integration/targets/waf_web_acl/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "518493cfc795471221b563e655f94121d1d74adbb344507dd0b319a8e92db773", + "chksum_sha256": "65de73f34f03fd5404a791767b52ffb9c281a99546e9047e79612aa0ec25f696", "format": 1 }, { @@ -8649,7 +8796,7 @@ "name": "tests/integration/targets/wafv2/tasks/alb.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dcc31d4757a797b348bf05e2aec8ed8cd50b04b1d8c2420b3043b8bea6f5190a", + "chksum_sha256": "fd78c3784d0bb94a107e00206183ed8d532c9ee45b9da7e3e5f8ee226f2dda98", "format": 1 }, { @@ -8663,14 +8810,14 @@ "name": "tests/integration/targets/wafv2/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6162da42e4339277ba6e75a16f3c0305846f78b765ab7b434eb235c2d855b1de", + "chksum_sha256": "ce26fcde81d142e1df4fd4a578fecc539a10d23907530f0cb5de167edcfc431e", "format": 1 }, { "name": "tests/integration/targets/wafv2/tasks/rule_group.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3cb05e8025e20292761a7b7140fe94406c7d7d61c14c068845058775a94bc623", + "chksum_sha256": "3e384c797bc89155b8eda4f752cc9fb05b9e5f06f14236d01f5c2b44542edf44", "format": 1 }, { @@ -8747,7 +8894,7 @@ "name": "tests/integration/targets/wafv2_ip_set/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4ba143352356e853990e43c1b5016edde2bf5cf036575636f8c161b631790704", + "chksum_sha256": "9c78b0aee774c364d76c7d47f0dcc312f3181352f1c43869c98d33271a2f6710", "format": 1 }, { @@ -8817,7 +8964,7 @@ "name": "tests/integration/targets/wafv2_rule_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6e64e904f4c4eb5473040c571b31359a88a00c26b9120dfccb7dfe6b6440c165", + "chksum_sha256": "98381c4b8670f0377735ec370a9ade0c85d967ef7d6f5aa68ff2ff327ad76379", "format": 1 }, { @@ -8887,7 +9034,7 @@ "name": "tests/integration/targets/wafv2_web_acl/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7308daea89d3121825b445219272e2aa8e318cfac7ce513f80f9c9467cc23285", + "chksum_sha256": "de8d980b4ab53a80311cb5ccf6e987909497ab0046496817ebe76bf16c44c37f", "format": 1 }, { @@ -8915,14 +9062,21 @@ "name": "tests/integration/constraints.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "591bc7bcc41461d47b97991b920ce187502c20e877eb412259f6797a1a7388f2", + "chksum_sha256": "13f897a645a2679a509e2921a3aa296e516a7c43f6b18a8372cd9bfae095a4fe", "format": 1 }, { "name": "tests/integration/requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "70888db5be6bfe5255337eb4a26a9e206ebc2e9bf6c3b18976514c2777fa7df9", + "chksum_sha256": "54e83176165f41d8e11335c6c7a408328692e04f45c705ce0f2bd64a0f391746", + "format": 1 + }, + { + "name": "tests/integration/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c74b1d99da40fb88dcd091936b54a61fe73c0dfcf9fe175cee4607ccb3bd8cf", "format": 1 }, { @@ -8943,42 +9097,63 @@ "name": "tests/sanity/ignore-2.11.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8720584a97c2e8e28e7c8c51a50c783f9ea031383dc2122a3681ecc507662f6d", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { "name": "tests/sanity/ignore-2.12.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8720584a97c2e8e28e7c8c51a50c783f9ea031383dc2122a3681ecc507662f6d", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { "name": "tests/sanity/ignore-2.13.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8720584a97c2e8e28e7c8c51a50c783f9ea031383dc2122a3681ecc507662f6d", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { "name": "tests/sanity/ignore-2.14.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8720584a97c2e8e28e7c8c51a50c783f9ea031383dc2122a3681ecc507662f6d", + "chksum_sha256": "c9271c547f92773ba4ed8df6eb56394c85deefb66078ae7336b285aee9d252d9", "format": 1 }, { "name": "tests/sanity/ignore-2.15.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8720584a97c2e8e28e7c8c51a50c783f9ea031383dc2122a3681ecc507662f6d", + "chksum_sha256": "c9271c547f92773ba4ed8df6eb56394c85deefb66078ae7336b285aee9d252d9", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.16.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9271c547f92773ba4ed8df6eb56394c85deefb66078ae7336b285aee9d252d9", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.17.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9271c547f92773ba4ed8df6eb56394c85deefb66078ae7336b285aee9d252d9", "format": 1 }, { "name": "tests/sanity/ignore-2.9.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "41aa5d215ac45215b5398be241ab9f33ae157cf6874cc3c8a8179bb3a84e8655", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/sanity/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71afcdc49ddb153ed00b7af0d3e15b9821d5921fe8af6447e9e8867463b28c01", "format": 1 }, { @@ -9006,21 +9181,7 @@ "name": "tests/unit/compat/builtins.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7163336aa20ba9db9643835a38c25097c8a01d558ca40869b2b4c82af25a009c", - "format": 1 - }, - { - "name": "tests/unit/compat/mock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", - "format": 1 - }, - { - "name": "tests/unit/compat/unittest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096", + "chksum_sha256": "9f93829747b69dc5f5feafae69e9dd73bcf093425a6784b720ffafd98be2eb4d", "format": 1 }, { @@ -9041,35 +9202,35 @@ "name": "tests/unit/mock/loader.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cfe3480f0eae6d3723ee62d01d00a0e9f58fcdc082ea1d8e4836157c56d4fa95", + "chksum_sha256": "b5fc5835142013d1fface268232ec402bf512e170c5a19f787e8ebf997287209", "format": 1 }, { "name": "tests/unit/mock/path.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "245e913b4f4cd7bc334b3f4dfc851dc4ec0d297f57bba03703b987a48df9c76c", + "chksum_sha256": "43365b95bab3fd31d1a1af3429d9abd0b8be39c7a6273390982da71dc03a9633", "format": 1 }, { "name": "tests/unit/mock/procenv.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cba88dd59128f6e70be7d022fe05201689a171c307a100a19142431ab3a707dd", + "chksum_sha256": "e0fef92aba32287e1a98d3e3def0aeb99c630cc661f17c8ea4d50d2f39a8f1ce", "format": 1 }, { "name": "tests/unit/mock/vault_helper.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1c5bb024ac2f936a8755eca00fb6e43f2a72dac131fe07f1ed0249dc29504ee0", + "chksum_sha256": "d4396f88cc9fcfa2eb2f64cc74cb34836dfc53bd64caba1cd2c702abacfe877a", "format": 1 }, { "name": "tests/unit/mock/yaml_helper.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cd95a4807e52e9123a8d40132a5f52b75cbc1496e1a32b104b2655bf631cfee4", + "chksum_sha256": "0faf6b227f7dda38b0294634f0642370e1cf527b1d7f2383805090aceadd2c4a", "format": 1 }, { @@ -9097,7 +9258,28 @@ "name": "tests/unit/plugins/connection/test_aws_ssm.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2ee7c115d8f0af46509d68f2e6072a217004b97ca569d1fadf10e2d66ce71e4e", + "chksum_sha256": "5a11663cddcfe3a4bdb8ab0ec4eb4d2080ad4d558809a786f4e9074cc6b5f0a3", + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory/test_aws_mq.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "035408796c251f8ff8c12879dad71c963c50c0c224a2b777a218bf07b996198c", "format": 1 }, { @@ -12954,98 +13136,105 @@ "name": "tests/unit/plugins/modules/conftest.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "909818cefd5093894a41494d1e43bd625538f57821375a564c52fe4219960967", + "chksum_sha256": "81e71af2ccd3142d9920cc5556b457be917c22e53e6053ed6267a452982b4147", "format": 1 }, { "name": "tests/unit/plugins/modules/test_acm_certificate.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c8d9dc4d3b222057ea2dd88de45b4edfefe570103c25df6b9cd7791d2a5ed441", + "chksum_sha256": "8b111ce566fca4947c766db075a9b2f31080cc43ace00d12e2fddf4ede754e00", "format": 1 }, { "name": "tests/unit/plugins/modules/test_api_gateway.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5f3e50f86d71c39b638654e4beaa912bf8d872cf485d08dfa885d388e48cb635", + "chksum_sha256": "d4c41a79ca896e841e24c40e7d3f5a285eb78dd72551ae63c180f7934d6e9bdb", "format": 1 }, { "name": "tests/unit/plugins/modules/test_data_pipeline.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8b71a110087842ba2670f50c69dca631e6cab624fc6a691291242423e4a654ed", + "chksum_sha256": "a8697fb31630a20574f5f81f467adc50257040116cdcead6438410cea736df3d", "format": 1 }, { "name": "tests/unit/plugins/modules/test_directconnect_confirm_connection.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "16c2fd15a17f38f268b9502f50fdf5436531709a70122b9d6b38130e04d78aed", + "chksum_sha256": "2b5240fde9db0aa9e3e14f254b06a9972793804cf1b075d2a1b1bb028428d017", "format": 1 }, { "name": "tests/unit/plugins/modules/test_directconnect_connection.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f52f6e3aa547e323a44fd9a8b6dfffc393b9f9414d76d4c09ed0be8998fa6490", + "chksum_sha256": "81c66fbda98ccbac133e8e6fe1e9f0277e3419c129134c9cf918502871b929bc", "format": 1 }, { "name": "tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e9d9280af13327edd0d08012655c65fdc6cc2db796b83ab27542323cfbdf5b3e", + "chksum_sha256": "996521133d39909cfa46ec11427f9eeeae33de0f6c52684273508677c3a3870a", "format": 1 }, { "name": "tests/unit/plugins/modules/test_directconnect_virtual_interface.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1e16f8fb2dce702bb3ff65a7bcc34bfb460c2d45965ffb1b202a814e85cfc1a5", + "chksum_sha256": "ca4e6235b712aa1fdfb3140db626d29b048a06bbe09a051820496af9f8c42acc", "format": 1 }, { "name": "tests/unit/plugins/modules/test_ec2_vpc_vpn.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6811508e0027e7f15f32983c4952c83f91f3fa0a9bb2ab8b4a8be2175d33964c", + "chksum_sha256": "94fb060fbaf50535f6ea148564a2edc422b7cbc446edecf0f75d02a992695f2c", "format": 1 }, { "name": "tests/unit/plugins/modules/test_ec2_win_password.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0baf51af954156925dc18084ae4acf860b826165fb8f0d4eebfd07cde8c3835c", + "chksum_sha256": "fea620774cb090935afe45f7ed859293fcc5bbc3d959dcb1dacd7b96dead8a6d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_opensearch.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "940d1424565b5b47244263dcccbeb3ce40fa52703b11d7abb021344684a46329", "format": 1 }, { - "name": "tests/unit/plugins/modules/test_iam_password_policy.py", + "name": "tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "49e13f05a469cee2a210d11d3014aedb7223dd26f607e2330d9a8be776f6ba72", + "chksum_sha256": "c58e54120724d9cca27440a55466fb44940420f0fabce15ba512183d34b7edcf", "format": 1 }, { - "name": "tests/unit/plugins/modules/test_opensearch.py", + "name": "tests/unit/plugins/modules/test_route53_wait.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8da1eb0aaffc76cedccfeef40a874b88ef98a5d678cccd299627b76cddc48fa1", + "chksum_sha256": "052e79e5bf916a83235def24c6ad2fef6c32239dad53ab2e597220c9ca4bea5d", "format": 1 }, { - "name": "tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py", + "name": "tests/unit/plugins/modules/test_ssm_inventory_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "47804b0d95dc8868f340c09678cf8ae4cccdc2990c0b40786fa0ae6416b01f50", + "chksum_sha256": "02a8d350f832fd87c4805c66d0c4086b3854b5f84fe951ef753720c466e1015e", "format": 1 }, { "name": "tests/unit/plugins/modules/utils.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6dbf5791f5fb392bbc72347d7da850058fdd613b51966a68e1f8022f8ac69eba", + "chksum_sha256": "5c553ea4bf90da544abac8dd1bda0b8714ddb22d8a39d17d178ccd9222db688d", "format": 1 }, { @@ -13066,7 +13255,7 @@ "name": "tests/unit/constraints.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "591bc7bcc41461d47b97991b920ce187502c20e877eb412259f6797a1a7388f2", + "chksum_sha256": "bc0121f23632af60e317c297eeebd434aebe98064c9631c2a69e8e5880eb725f", "format": 1 }, { @@ -13077,6 +13266,13 @@ "format": 1 }, { + "name": "tests/unit/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71afcdc49ddb153ed00b7af0d3e15b9821d5921fe8af6447e9e8867463b28c01", + "format": 1 + }, + { "name": "tests/.gitignore", "ftype": "file", "chksum_type": "sha256", @@ -13087,7 +13283,7 @@ "name": "tests/config.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9d75ecdecbd76691b04ec2d5fcf9241a4366801e6a1e5db09785453cd429c862", + "chksum_sha256": "1c08c8ebb64b4244179f4ceee7e9074c0e0a7696f299bd9659fe09381a2eebdb", "format": 1 }, { @@ -13098,31 +13294,31 @@ "format": 1 }, { - "name": "tests/requirements.yml", + "name": ".gitignore", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "66fad8a0eb762fc61d4aa6c516d876f6bb89e73cfad5c005e78c3f4a2fd91aa5", + "chksum_sha256": "554f1be4f521490671cb70f96f35a8a98c2d7f92b751ea76ce7665869b283d9a", "format": 1 }, { - "name": ".gitignore", + "name": ".yamllint", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ec8428728dab8be96951a9aa637a77e33ef67bd444a56295d2867e2426d481d9", + "chksum_sha256": "20f14c567d8ba0813a1ae58e298093a8004e4657baed321e4567de0f676beeaf", "format": 1 }, { "name": "CHANGELOG.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64943fcf1cb90ec0ae493011dcbef073aec7f5c235aaf36e53890a60cbd15826", + "chksum_sha256": "5c20e11dfc1704180b5d197a68107a5a6092c324a99739646c42bb0e1a0dc8a4", "format": 1 }, { "name": "CONTRIBUTING.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ae3f61010c5c9821d0a566b841867f78dd594a948abac7fdc784029c5222f4fc", + "chksum_sha256": "550c8671e9531921c7c20bb2b6c926060a527f6dfc0bd9fc76b25d6a0651100a", "format": 1 }, { @@ -13136,7 +13332,7 @@ "name": "README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "db5e1c1064e561f142f8dc83a5105353497b31e55c23bcc756e67030e728b0a6", + "chksum_sha256": "258051287346af763659d20980984c8c5689e0b5d3e6ce2bdaa1f597a2013ded", "format": 1 }, { @@ -13147,17 +13343,31 @@ "format": 1 }, { + "name": "pyproject.toml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da49e18617bd8f6350e2e92c4ef5366aa8423c1aa9de01cff6bfdd7fa604ab4e", + "format": 1 + }, + { "name": "requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "69d234edeaedcedfa2e796dc5f0f9ddabad4bfb3959100d8814a07cedf702c2f", + "chksum_sha256": "2c71169e5f0cdc74b4e423519a95fe50a499c1c9163d9550ccd7cba56e9901a6", "format": 1 }, { "name": "test-requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "be6aa80a131f3e7fac406bccaae2e693de33f22e82ef9bdb6e2cbbdf008c4a21", + "chksum_sha256": "5dcd51d5a5f8631fa8466420f4eb6302cec4ce4e5ea10ee3199602bd36a2bc75", + "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1201123003e01af03ffb7cf8669ea1fc7a8ddc0bff1a181c2484d0bd0087ed5e", "format": 1 } ], diff --git a/ansible_collections/community/aws/MANIFEST.json b/ansible_collections/community/aws/MANIFEST.json index 437a04742..224e6ba06 100644 --- a/ansible_collections/community/aws/MANIFEST.json +++ b/ansible_collections/community/aws/MANIFEST.json @@ -2,7 +2,7 @@ "collection_info": { "namespace": "community", "name": "aws", - "version": "5.5.0", + "version": "7.1.0", "authors": [ "Ansible (https://github.com/ansible)" ], @@ -13,14 +13,14 @@ "cloud", "amazon" ], - "description": null, + "description": "A variety of Ansible content to help automate the management of AWS services.", "license": [], "license_file": "COPYING", "dependencies": { - "amazon.aws": ">=5.0.0" + "amazon.aws": ">=7.0.0,<8.0.0" }, "repository": "https://github.com/ansible-collections/community.aws", - "documentation": "https://ansible-collections.github.io/community.aws/branch/stable-5/collections/community/aws/index.html", + "documentation": "https://ansible-collections.github.io/community.aws/branch/stable-7/collections/community/aws/index.html", "homepage": "https://github.com/ansible-collections/community.aws", "issues": "https://github.com/ansible-collections/community.aws/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" }, @@ -28,7 +28,7 @@ "name": "FILES.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "91fe278d2365d772b572ea028dcb97117d3e98e74569adf6c63b6e7d89e8c74e", + "chksum_sha256": "efadd5485c903284fc4f80439e50ff696569304185ef6c966c442c2b5603c5c8", "format": 1 }, "format": 1 diff --git a/ansible_collections/community/aws/README.md b/ansible_collections/community/aws/README.md index 1a4bf3232..ba02cf6db 100644 --- a/ansible_collections/community/aws/README.md +++ b/ansible_collections/community/aws/README.md @@ -1,12 +1,12 @@ # Community AWS Collection -The Ansible Community AWS collection includes a variety of Ansible content to help automate the management of AWS instances. This collection is maintained by the Ansible community. +The Ansible Community AWS collection includes a variety of Ansible content to help automate the management of AWS services. This collection is maintained by the Ansible community. AWS related modules and plugins supported by the Ansible Cloud team are in the [amazon.aws](https://github.com/ansible-collections/amazon.aws) collection. ## Ansible version compatibility -Tested with the Ansible Core 2.12, and 2.13 releases, and the current development version of Ansible. Ansible Core versions before 2.11.0 are not supported. In particular, Ansible Core 2.10 and Ansible 2.9 are not supported. +Tested with the Ansible Core >= 2.12.0 versions, and the current development version of Ansible. Ansible Core versions before 2.12.0 are not supported. Use community.aws 4.x.y if you are using Ansible 2.9 or Ansible Core 2.10. @@ -14,24 +14,39 @@ Use community.aws 4.x.y if you are using Ansible 2.9 or Ansible Core 2.10. This collection depends on the AWS SDK for Python (Boto3 and Botocore). Due to the [AWS SDK Python Support Policy](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) -this collection requires Python 3.6 or greater. - -Amazon have also announced the end of support for -[Python less than 3.7](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/). -As such support for Python less than 3.7 by this collection has been deprecated and will be removed in a release -after 2023-05-31. +this collection requires Python 3.7 or greater. + +Amazon have also announced the planned end of support for +[Python less than 3.8](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/). +As such support for Python less than 3.8 will be removed in a release after 2024-12-01. + +<!--- +### End of Support by Python Versions: + +| Python Version | AWS SDK | Collection | +| -------------- | -------- | ---------- | +| 2.7 | July 2021 | Release 2.0.0 (September 2021) | +| 3.4 | February 2021 | Release 1.0.0 (June 2020) | +| 3.5 | February 2021 | Release 2.0.0 (September 2021) | +| 3.6 | May 2022 | Release 7.0.0 (November 2023) | +| 3.7 | December 2023 | *After December 2024* | +| 3.8 | April 2025 | *After April 2026* | +| 3.9 | April 2026 | *After April 2027* | +| 3.10 | April 2027 | *After April 2028* | +| 3.11 | April 2028 | *After April 2029* | +---> ## AWS SDK version compatibility Starting with the 2.0.0 releases of amazon.aws and community.aws, it is generally the collection's policy to support the versions of `botocore` and `boto3` that were released 12 months prior to the most recent major collection release, following semantic versioning (for example, 2.0.0, 3.0.0). -Version 5.0.0 of this collection supports `boto3 >= 1.18.0` and `botocore >= 1.21.0` +Version 7.0.0 of this collection supports `boto3 >= 1.26.0` and `botocore >= 1.29.0` All support for the original AWS SDK `boto` was removed in release 4.0.0. ## Included content <!--start collection content--> -See the complete list of collection content in the [Plugin Index](https://ansible-collections.github.io/community.aws/branch/stable-5/collections/community/aws/index.html#plugin-index). +See the complete list of collection content in the [Plugin Index](https://ansible-collections.github.io/community.aws/branch/stable-7/collections/community/aws/index.html#plugin-index). <!--end collection content--> @@ -90,7 +105,7 @@ You can either call modules by their Fully Qualified Collection Name (FQCN), suc ### See Also: -* [Amazon Web Services Guide](https://docs.ansible.com/ansible/latest/scenario_guides/guide_aws.html) +* [Amazon Web Services Guide](https://docs.ansible.com/ansible/latest/collections/amazon/aws/docsite/guide_aws.html) * [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. ## Contributing to this collection @@ -106,12 +121,12 @@ You can also join us on: - [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) - Details on contributing to Ansible - [Contributing to Collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections) - How to check out collection git repositories correctly -- [Guidelines for Ansible Amazon AWS module development](https://docs.ansible.com/ansible/latest/dev_guide/platforms/aws_guidelines.html) +- [Guidelines for Ansible Amazon AWS module development](https://docs.ansible.com/ansible/latest/collections/amazon/aws/docsite/dev_guidelines.html) - [Getting Started With AWS Ansible Module Development and Community Contribution](https://www.ansible.com/blog/getting-started-with-aws-ansible-module-development) ## Release notes -See the [rendered changelog](https://ansible-collections.github.io/community.aws/branch/stable-5/collections/community/aws/docsite/CHANGELOG.html) or the [raw generated changelog](https://github.com/ansible-collections/community.aws/tree/stable-5/CHANGELOG.rst). +See the [rendered changelog](https://ansible-collections.github.io/community.aws/branch/stable-7/collections/community/aws/docsite/CHANGELOG.html) or the [raw generated changelog](https://github.com/ansible-collections/community.aws/tree/stable-7/CHANGELOG.rst). ## Roadmap diff --git a/ansible_collections/community/aws/changelogs/changelog.yaml b/ansible_collections/community/aws/changelogs/changelog.yaml index ee3f2b9ab..ea65a58be 100644 --- a/ansible_collections/community/aws/changelogs/changelog.yaml +++ b/ansible_collections/community/aws/changelogs/changelog.yaml @@ -28,7 +28,7 @@ releases: - cloudwatchlogs_log_group - Fix a KeyError when updating a log group that does not have a retention period (https://github.com/ansible/ansible/issues/47945) - cloudwatchlogs_log_group_info - remove limitation of max 50 results - - ec2_asg - Ensure "wait" is honored during replace operations + - ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing @@ -1581,6 +1581,73 @@ releases: - 970-redshift_info-boto-import.yml - 977-add-backoff-logic-elb-info.yml release_date: '2022-03-30' + 2.5.0: + changes: + bugfixes: + - ecs_service - add missing change detect of ``health_check_grace_period_seconds`` + parameter (https://github.com/ansible-collections/community.aws/pull/1145). + - ecs_service - fix broken compare of ``task_definition`` that results always + in a changed task (https://github.com/ansible-collections/community.aws/pull/1145). + - ecs_service - fix validation for ``placement_constraints``. It's possible + to use ``distinctInstance`` placement constraint now (https://github.com/ansible-collections/community.aws/issues/1058) + - ecs_taskdefinition - fix broken change detect of ``launch_type`` parameter + (https://github.com/ansible-collections/community.aws/pull/1145). + - execute_lambda - fix check mode and update RETURN documentation (https://github.com/ansible-collections/community.aws/pull/1115). + - iam_policy - require one of ``policy_document`` and ``policy_json`` when state + is present to prevent MalformedPolicyDocumentException from being thrown (https://github.com/ansible-collections/community.aws/pull/1093). + - s3_lifecycle - add support of value *0* for ``transition_days`` (https://github.com/ansible-collections/community.aws/pull/1077). + - s3_lifecycle - check that configuration is complete before returning (https://github.com/ansible-collections/community.aws/pull/1085). + minor_changes: + - iam_policy - update broken examples and add RETURN section to documentation; + add extra integration tests for idempotency check mode runs (https://github.com/ansible-collections/community.aws/pull/1093). + - iam_role - delete inline policies prior to deleting role (https://github.com/ansible-collections/community.aws/pull/1054). + - iam_role - remove global vars and refactor accordingly (https://github.com/ansible-collections/community.aws/pull/1054). + release_summary: This is the minor release of the ``community.aws`` collection. + fragments: + - 0000-ecs_taskdefinition_fix.yml + - 1054-iam_role-delete-inline-policies-and-refactor.yml + - 1077-s3_lifecycle-transition-days-zero.yml + - 1085-s3_lifecycle-check-that-configuration-is-complete-before-returning.yml + - 1093-iam_policy-update-docs-and-add-required_if.yml + - 1115-execute_lambda-checkmode-fix-update-return-docs.yml + - 1300-ecs_service-placementConstraints.yml + - 2.5.0.yml + release_date: '2022-05-30' + 2.6.0: + changes: + bugfixes: + - ecs_service - fix broken change detect of ``health_check_grace_period_seconds`` + parameter when not specified (https://github.com/ansible-collections/community.aws/pull/1212). + - ecs_service - use default cluster name of ``default`` when not input (https://github.com/ansible-collections/community.aws/pull/1212). + - ecs_task - dont require ``cluster`` and use name of ``default`` when not input + (https://github.com/ansible-collections/community.aws/pull/1212). + - wafv2_ip_set - fix bug where incorrect changed state was returned when only + changing the description (https://github.com/ansible-collections/community.aws/pull/1211). + minor_changes: + - ecs_service - ``deployment_circuit_breaker`` has been added as a supported + feature (https://github.com/ansible-collections/community.aws/pull/1215). + - ecs_service - add ``service`` alias to address the ecs service name with the + same parameter as the ecs_service_info module is doing (https://github.com/ansible-collections/community.aws/pull/1187). + - ecs_service_info - add ``name`` alias to address the ecs service name with + the same parameter as the ecs_service module is doing (https://github.com/ansible-collections/community.aws/pull/1187). + release_summary: 'This is the last planned 2.x release of the ``community.aws`` + collection. + + Consider upgrading to the latest version of ``community.aws`` soon.' + fragments: + - 0001-ecs-service-aliases.yml + - 1211-wafv2_ip_set-description.yml + - 1212-ecs_service-fix-broken-change-detect.yml + - 1215-ecs-service-deployment-circuit-breaker-support.yml + - 2.6.0.yml + release_date: '2022-06-22' + 2.6.1: + changes: + release_summary: Bump collection from 2.6.0 to 2.6.1 due to a publishing error + with 2.6.0. This release supersedes 2.6.0 entirely, users should skip 2.6.0. + fragments: + - 261_increase.yml + release_date: '2022-06-22' 3.0.0: changes: breaking_changes: @@ -1988,7 +2055,7 @@ releases: - route53_info - The CamelCase return values for ``HostedZones``, ``ResourceRecordSets``, and ``HealthChecks`` have been deprecated, in the future release you must use snake_case return values ``hosted_zones``, ``resource_record_sets``, and - ``health_checks`` instead respectively". + ``health_checks`` instead respectively. minor_changes: - aws_codebuild - add support for ``purge_tags`` parameter (https://github.com/ansible-collections/community.aws/pull/1221). - aws_codebuild - add the ``resource_tags`` parameter which takes the dictionary @@ -2063,6 +2130,31 @@ releases: - 580-vpc_peer-idempotency.yml - 645-aws_config_aggregator-fix-update-and-idempotency.yml release_date: '2022-08-03' + 3.6.0: + changes: + bugfixes: + - ec2_placement_group - Handle a potential race creation during the creation + of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). + - s3_lifecycle - fix bug when deleting rules with an empty prefix (https://github.com/ansible-collections/community.aws/pull/1398). + minor_changes: + - autoscaling_group_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - cloudfront_distribution - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - cloudfront_origin_access_identity - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - cloudtrail - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - ec2_asg_lifecycle_hook - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - ec2_vpc_nacl - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - redshift - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - s3_bucket_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + release_summary: 'Following the release of community.aws 5.0.0, 3.6.0 is a bugfix + release and the final planned release for the 3.x series. + + ' + fragments: + - 1398-s3_lifecycle-no-prefix.yml + - 1410-linting.yml + - RELEASE-3.6.0.yml + - ec2_placement_group_race_on_create.yaml + release_date: '2022-10-06' 4.0.0: changes: breaking_changes: @@ -2328,7 +2420,7 @@ releases: - aws_kms_info - the unused and deprecated ``keys_attr`` parameter has been removed (https://github.com/ansible-collections/amazon.aws/pull/1172). - data_pipeline - the ``version`` option has always been ignored and has been - removed (https://github.com/ansible-collections/community.aws/pull/1160" + removed (https://github.com/ansible-collections/community.aws/pull/1160 - ec2_eip - The ``wait_timeout`` option has been removed. It has always been ignored by the module (https://github.com/ansible-collections/community.aws/pull/1159). - ec2_lc - the ``associate_public_ip_address`` option has been removed. It has @@ -2485,7 +2577,7 @@ releases: - route53_info - The CamelCase return values for ``DelegationSets``, ``CheckerIpRanges``, and ``HealthCheck`` have been deprecated, in the future release you must use snake_case return values ``delegation_sets``, ``checker_ip_ranges``, and ``health_check`` - instead respectively" (https://github.com/ansible-collections/community.aws/pull/1322). + instead respectively (https://github.com/ansible-collections/community.aws/pull/1322). minor_changes: - aws_glue_connection - added new ``raw_connection_parameters`` return key which doesn't snake case the connection parameters (https://github.com/ansible-collections/community.aws/pull/518). @@ -2590,6 +2682,102 @@ releases: fragments: - 1398-s3_lifecycle-no-prefix.yml release_date: '2022-09-14' + 4.3.0: + changes: + bugfixes: + - ec2_placement_group - Handle a potential race creation during the creation + of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). + - rds_cluster - fixes bug where specifiying an rds cluster parameter group raises + a ``KeyError`` (https://github.com/ansible-collections/community.aws/pull/1417). + minor_changes: + - autoscaling_group_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - cloudfront_distribution - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - cloudfront_origin_access_identity - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - cloudtrail - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - ec2_vpc_nacl - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - eks_fargate_profile - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - redshift - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + - s3_bucket_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + release_summary: 'The community.aws 4.3.0 release includes a number of minor + bug fixes and improvements. + + Following the release of amazon.aws 5.0.0, backports to the 4.x series will + be limited to security issues and bugfixes.' + fragments: + - 1410-linting.yml + - 1417-cluster-param-group-keyerror.yml + - RELEASE-4.3.0.yml + - ec2_placement_group_race_on_create.yaml + release_date: '2022-10-06' + 4.4.0: + changes: + bugfixes: + - aws_ssm - fixes S3 bucket region detection by ensuring boto client has correct + credentials and exists in correct partition (https://github.com/ansible-collections/community.aws/pull/1428). + - ecs_ecr - fix a ``RepositoryNotFound`` exception when trying to create repositories + in check mode (https://github.com/ansible-collections/community.aws/pull/1550). + - opensearch - Fix cluster creation when using advanced security options (https://github.com/ansible-collections/community.aws/pull/1613). + minor_changes: + - elasticache_parameter_group - add ``redis6.x`` group family on the module + input choices (https://github.com/ansible-collections/community.aws/pull/1476). + release_summary: 'This is the minor release of the ``community.aws`` collection. + + This changelog contains all changes to the modules and plugins in this collection + + that have been made after the previous release.' + fragments: + - 1428-aws-ssm-missing-credentials.yml + - 1476-add-redis6x-cache-parameter-group-family.yml + - 1550-ecs_ecr-RepositoryNotFound.yml + - 1565-healthCheck-docs.yml + - 1579-ec2_vpc_vgw-deleted.yml + - 1613-opensearch.yml + - 20221026-pytest-forked.yml + - 4.4.0.yml + release_date: '2022-12-08' + 4.5.0: + changes: + bugfixes: + - aws_ssm - fix ``invalid literal for int`` error on some operating systems + (https://github.com/ansible-collections/community.aws/issues/113). + - ecs_service - respect ``placement_constraints`` for existing ecs services + (https://github.com/ansible-collections/community.aws/pull/1601). + - s3_lifecycle - Module no longer calls ``put_lifecycle_configuration`` if there + is no change. (https://github.com/ansible-collections/community.aws/issues/1624) + - ssm_parameter - Fix a ``KeyError`` when adding a description to an existing + parameter (https://github.com/ansible-collections/community.aws/issues/1471). + minor_changes: + - ecs_service - support load balancer update for existing ecs services(https://github.com/ansible-collections/community.aws/pull/1625). + - iam_role - Drop deprecation warning, because the standard value for purge + parametes is ``true`` (https://github.com/ansible-collections/community.aws/pull/1636). + release_summary: This is the minor release of the ``community.aws`` collection. + fragments: + - 1601-ecs_service-support_constraints_and_strategy_update.yml + - 1624-s3-lifecycle-idempotent.yml + - 1625-ecs_service-support-load-balancer-update.yml + - 1627-ssm_parameter-KeyError-when-adding-description.yml + - 20230112-aws_ssm-tests.yml + - 4.5.0.yml + - 558-ssm_connection-invalid-literal.yml + - iam_role_purge_policy.yml + release_date: '2023-01-25' + 4.5.1: + changes: + bugfixes: + - sns_topic - avoid fetching attributes from subscribers when not setting them, + this can cause permissions issues (https://github.com/ansible-collections/community.aws/pull/1418). + release_summary: 'This release contains a minor bugfix for the ``sns_topic`` + module as well as corrections to the documentation for various modules. This + is the last planned release of the 4.x series. + + ' + fragments: + - 1576-defaults.yml + - 1757-config_rule-evaluation-mode.yml + - iam_access_key_docs_fix.yml + - release-notes.yml + - sns_topic-cross-account.yml + release_date: '2023-05-05' 5.0.0: changes: breaking_changes: @@ -2751,7 +2939,7 @@ releases: - elb_network_lb - fixes bug where ``ip_address_type`` in return value was not updated (https://github.com/ansible-collections/community.aws/pull/1365). - rds_cluster - fixes bug where specifiying an rds cluster parameter group raises - a `KeyError` (https://github.com/ansible-collections/community.aws/pull/1417). + a ``KeyError`` (https://github.com/ansible-collections/community.aws/pull/1417). - s3_sync - fix etag generation when running in FIPS mode (https://github.com/ansible-collections/community.aws/issues/757). deprecated_features: - community.aws collection - due to the AWS SDKs announcing the end of support @@ -3004,8 +3192,8 @@ releases: bugfixes: - aws_ssm - fixes S3 bucket region detection by ensuring boto client has correct credentials and exists in correct partition (https://github.com/ansible-collections/community.aws/pull/1428). - - ec2_snapshot_copy - including tags caused the erorr "Tag specification resource - type must have a value". Fix sets the ResourceType to snapshot to resolve + - ec2_snapshot_copy - including tags caused the erorr ``Tag specification resource + type must have a value``. Fix sets the ResourceType to snapshot to resolve this issue (https://github.com/ansible-collections/community.aws/pull/1419). - ecs_ecr - fix a ``RepositoryNotFound`` exception when trying to create repositories in check mode (https://github.com/ansible-collections/community.aws/pull/1550). @@ -3227,3 +3415,322 @@ releases: - elasticache_info-ignore-CacheClusterNotFound-when-reading-tags.yaml - release-notes.yml release_date: '2023-05-05' + 5.5.1: + changes: + bugfixes: + - cloudfront_distribution - no longer crashes when waiting for completion of + creation (https://github.com/ansible-collections/community.aws/issues/255). + - cloudfront_distribution - now honours the ``enabled`` setting (https://github.com/ansible-collections/community.aws/issues/1823). + release_summary: This release brings several bugfixes. + fragments: + - 1823-cloudfront_distribution_always_created_enabled.yml + - 20230627-ci-fixup.yml + - 20230701-ci-fixup.yml + - 20230704-github_workflows.yml + - 255-cloudfront_distribution_create_wait_crash.yml + - release_summary.yml + - test-reqs.yml + - tests-requirements.yml + release_date: '2023-07-05' + 6.0.0: + changes: + breaking_changes: + - The community.aws collection has dropped support for ``botocore<1.25.0`` and + ``boto3<1.22.0``. Most modules will continue to work with older versions of + the AWS SDK, however compatability with older versions of the SDK is not guaranteed + and will not be tested. When using older versions of the SDK a warning will + be emitted by Ansible (https://github.com/ansible-collections/community.aws/pull/1743). + - aws_ssm - the AWS SSM plugin was incorrectly prepending ``sudo`` to most commands. This + behaviour was incorrect and has been removed. To execute commands as a specific + user, including the ``root`` user, the ``become`` and ``become_user`` directives + should be used. See the `Ansible documentation for more information <https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_privilege_escalation.html>`_ + (https://github.com/ansible-collections/community.aws/issues/853). + - codebuild_project - ``tags`` parameter now accepts a dict representing the + tags, rather than the boto3 format (https://github.com/ansible-collections/community.aws/pull/1643). + bugfixes: + - opensearch_info - Fix the name of the domain_name key in the example (https://github.com/ansible-collections/community.aws/pull/1811). + - ses_identity - fix clearing notification topic (https://github.com/ansible-collections/community.aws/issues/150). + deprecated_features: + - community.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.8 by this collection is expected to be removed + in a release after 2024-12-01 (https://github.com/ansible-collections/community.aws/pull/1743). + - community.aws collection - due to the AWS SDKs announcing the end of support + for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.7 by this collection has been deprecated and + will be removed in release 7.0.0. (https://github.com/ansible-collections/community.aws/pull/1743). + minor_changes: + - The ``black`` code formatter has been run across the collection to improve + code consistency (https://github.com/ansible-collections/community.aws/pull/1784). + - aws_config_delivery_channel - add support for encrypted objects in S3 via + KMS key (https://github.com/ansible-collections/community.aws/pull/1786). + - aws_ssm - Updated the documentation to explicitly mention that the ``ansible_user`` + and ``remote_user`` variables are not supported by the plugin (https://github.com/ansible-collections/community.aws/pull/1682). + - bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/community.aws/pull/1810). + - cloudfront_distribution - add ``http3`` support via parameter value ``http2and3`` + for parameter ``http_version`` (https://github.com/ansible-collections/community.aws/pull/1753). + - cloudfront_distribution - add ``origin_shield`` options (https://github.com/ansible-collections/community.aws/pull/1557). + - cloudfront_distribution - documented ``connection_attempts`` and ``connection_timeout`` + the module was already capable of using them + - community.aws - updated document fragments based on changes in amazon.aws + (https://github.com/ansible-collections/community.aws/pull/1738). + - community.aws - updated imports based on changes in amazon.aws (https://github.com/ansible-collections/community.aws/pull/1738). + - ecs_ecr - use ``compare_policies`` when comparing lifecycle policies instead + of naive ``sort_json_policy_dict`` comparisons (https://github.com/ansible-collections/community.aws/pull/1551). + - elasticache - Use the ``cache.t3.small`` node type in the example. ``cache.m1.small`` + is not deprecated. + - minor code fixes and enable integration tests for modules cloudfront_distribution, + cloudfront_invalidation and cloudfront_origin_access_identity (https://github.com/ansible-collections/community.aws/pull/1596). + - module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/community.aws/pull/1632). + - wafv2_rule_group_info - remove unused and deprecated ``state`` parameter (https://github.com/ansible-collections/community.aws/pull/1555). + release_summary: 'This release brings some new plugins and features. Several + bugfixes, breaking changes and deprecated features are also included. + + The community.aws collection has dropped support for ``botocore<1.25.0`` and + ``boto3<1.22.0``. + + Support for Python 3.6 has also been dropped. + + ' + fragments: + - 1435-connection-attempt-timeout.yml + - 1551-ecs_ecr-sort_json_policy.yml + - 1555-wafv2_rule_group_info.yml + - 1557-cloudfront-add-origin-shield.yml + - 1634-networkfirewall_rule_group-tests.yml + - 1643-codebuild_project.yml + - 1730-ses-identity-fix-clearing-notification-topic.yml + - 1738-headers.yml + - 1753-cloudfront-add-http3.yml + - 1784-black.yml + - 1798-aws_ssm-black.yml + - 20221103-autoscaling_scheduled_action.yml + - 20230307-blueprint.yml + - 20230423-update_readme_and_runtime.yml + - 20230424-config-delivery-channel.yml + - 6.0.0-release.yml + - 853-aws_ssm-sudo.yml + - ansible-user-agent-identification.yaml + - botocore-tests.yml + - cloudfront_integration_tests_activate.yaml + - elasticache-use-up-to-date-node-type-in-example.yaml + - fstring-1.yml + - integration_tests_max_duration_increase.yaml + - opensearch_info_example_key_name.yaml + - python37.yml + - release-6-botocore.yml + - version_added.yml + modules: + - description: Manage an AWS VPC Carrier gateway + name: ec2_carrier_gateway + namespace: '' + - description: Gather information about carrier gateways in AWS + name: ec2_carrier_gateway_info + namespace: '' + - description: Creates snapshots of AWS Lightsail instances + name: lightsail_snapshot + namespace: '' + - description: MQ broker management + name: mq_broker + namespace: '' + - description: Update Amazon MQ broker configuration + name: mq_broker_config + namespace: '' + - description: Retrieve MQ Broker details + name: mq_broker_info + namespace: '' + - description: Manage users in existing Amazon MQ broker + name: mq_user + namespace: '' + - description: List users of an Amazon MQ broker + name: mq_user_info + namespace: '' + - description: Get SSM inventory information for EC2 instance + name: ssm_inventory_info + namespace: '' + release_date: '2023-05-10' + 6.1.0: + changes: + bugfixes: + - batch_compute_environment - fixed incorrect handling of Gov Cloud ARNs in + ``compute_environment_name`` parameter (https://github.com/ansible-collections/community.aws/issues/1846). + - cloudfront_distribution - The origins recognises the s3 domains with region + part now (https://github.com/ansible-collections/community.aws/issues/1819). + - cloudfront_distribution - no longer crashes when waiting for completion of + creation (https://github.com/ansible-collections/community.aws/issues/255). + - cloudfront_distribution - now honours the ``enabled`` setting (https://github.com/ansible-collections/community.aws/issues/1823). + - dynamodb_table - secondary indexes are now created (https://github.com/ansible-collections/community.aws/issues/1825). + - ec2_launch_template - fixed incorrect handling of Gov Cloud ARNs in ``compute_environment_name`` + parameter (https://github.com/ansible-collections/community.aws/issues/1846). + - elasticache_info - remove hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). + - iam_role - fixed incorrect rejection of Gov Cloud ARNs in ``boundary`` parameter + (https://github.com/ansible-collections/community.aws/issues/1846). + - msk_cluster - remove hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). + - redshift - fixed hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). + minor_changes: + - dynamodb_table - added waiter when updating indexes to avoid concurrency issues + (https://github.com/ansible-collections/community.aws/pull/1866). + - dynamodb_table - increased default timeout based on time to update indexes + in CI (https://github.com/ansible-collections/community.aws/pull/1866). + - iam_group - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). + - iam_role - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). + - sns_topic - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). + release_summary: This release brings a new inventory plugin, some new features, + and several bugfixes. + fragments: + - 1819-cloudfront-distribution-s3-domain-recognise.yaml + - 1823-cloudfront_distribution_always_created_enabled.yml + - 1825-dynamodb-table-no-secondary-indexes.yml + - 1846-arn.yml + - 20230531-mq-fix_fqdn.yml + - 20230613-black.yml + - 20230627-ci-fixup.yml + - 20230701-ci-fixup.yml + - 20230702-dynamodb_waiter.yml + - 255-cloudfront_distribution_create_wait_crash.yml + - 6.0.0.yml + - release_summary.yml + - test-reqs.yml + - tests-requirements.yml + plugins: + inventory: + - description: MQ broker inventory source + name: aws_mq + namespace: null + release_date: '2023-07-07' + 6.2.0: + changes: + bugfixes: + - Remove ``apigateway`` and ``apigateway_deployment`` from meta/runtime.yml + (https://github.com/ansible-collections/community.aws/pull/1905). + minor_changes: + - api_gateway - add support for parameters ``name``, ``lookup``, ``tags`` and + ``purge_tags`` (https://github.com/ansible-collections/community.aws/pull/1845). + - ec2_vpc_vpn - add support for connecting VPNs to a transit gateway (https://github.com/ansible-collections/community.aws/pull/1877). + release_summary: This release includes some new features for the ``community.aws.ec2_vpc_vpn`` + and ``community.aws.api_gateway`` modules. + fragments: + - 20230620-api_gateway-add-optional-name.yml + - 20230804-update-meta-runtime.yaml + - release_summary.yml + - transit_gateway_to_vpn.yaml + release_date: '2023-08-04' + 7.0.0: + changes: + breaking_changes: + - The community.aws collection has dropped support for ``botocore<1.29.0`` and + ``boto3<1.26.0``. Most modules will continue to work with older versions of + the AWS SDK, however compatability with older versions of the SDK is not guaranteed + and will not be tested. When using older versions of the SDK a warning will + be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763). + - aws_region_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.aws_region_info``. + - aws_s3_bucket_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.aws_s3_bucket_info``. + - community.aws collection - due to the AWS SDKs announcing the end of support + for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.7 by this collection wss been deprecated in + release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). + - iam_access_key - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_access_key``. + - iam_access_key_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_access_key_info``. + - iam_group - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945). + - iam_managed_policy - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954). + - iam_mfa_device_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953). + - iam_password_policy - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_password_policy``. + - iam_role - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948). + - iam_role_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948). + - s3_bucket_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.s3_bucket_info``. + - sts_assume_role - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.sts_assume_role``. + bugfixes: + - mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832). + - opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910). + minor_changes: + - api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962). + - api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962). + - community.aws collection - apply isort code formatting to ensure consistent + formatting of code (https://github.com/ansible-collections/community.aws/pull/1962) + - ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891). + - eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True`` + (https://github.com/ansible-collections/community.aws/pull/1994). + release_summary: This release includes some new features, bugfixes and breaking + changes. Several modules have been migrated to amazon.aws and the Fully Qualified + Collection Name for these modules needs to be updated. The community.aws collection + has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Due to the + AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/), + support for Python less than 3.7 by this collection was deprecated in release + 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). + fragments: + - 1832-mq_broker_tags.yml + - 1891_ecs-task-definition-add-runtime-platform.yml + - 1904-route53_wait.yml + - 1962-isort.yml + - 20230623-black-cloudfront.yml + - 20230702-isort.yml + - 20230801-fix-linters.yml + - 20230906-galaxy.yml + - 20230906-route53_wait.yml + - 20230908-alias-cleanup.yml + - 20230915_migrate_iam_role_and_iam_role_info.yml + - 7.0.0-dev0.yml + - botocore.yml + - botocore_params-cleanup.yml + - eks_nodegroup-integration-wait-delete.yml + - galaxy_importer.yml + - migrate_aws_region_info.yml + - migrate_iam_access_key.yml + - migrate_iam_group.yml + - migrate_iam_managed_policy.yml + - migrate_iam_mfa_device_info.yml + - migrate_iam_password_policy.yml + - migrate_s3_bucket_info.yml + - migrate_sts_assume_role.yml + - opensearch_domainconfig_no_options.yaml + - python37.yml + - release_summary.yml + - workflow-requirements.yml + release_date: '2023-11-06' + 7.1.0: + changes: + bugfixes: + - aws_ssm - disable `enable-bracketed-paste` to fix issue with amazon linux + 2023 and other OSes (https://github.com/ansible-collections/community.aws/issues/1756) + minor_changes: + - aws_ssm - Updated the documentation to explicitly state that an S3 bucket + is required, the behavior of the files in that bucket, and requirements around + that. (https://github.com/ansible-collections/community.aws/issues/1775). + - cloudfront_distribution - added support for ``cache_policy_id`` and ``origin_request_policy_id`` + for behaviors (https://github.com/ansible-collections/community.aws/pull/1589) + - mq_broker - add support to wait for broker state via ``wait`` and ``wait_timeout`` + parameter values (https://github.com/ansible-collections/community.aws/pull/1879). + release_summary: This release includes new features for the ``cloudfront_distribution`` + and ``mq_broker`` modules, as well as a bugfix for the ``aws_ssm`` connection + plugin needed when connecting to hosts with Bash 5.1.0 and later. + fragments: + - 1589-cloudfront_distribution-add-policies.yml + - 1775-aws_ssm-s3-docs.yaml + - 1839-disable-bracketed-paste.yml + - 1879-mq_broker-add-wait.yml + - release.yml + - ssm-fedora34.yml + release_date: '2024-01-10' diff --git a/ansible_collections/community/aws/changelogs/config.yaml b/ansible_collections/community/aws/changelogs/config.yaml index df8a7220c..6ac07f935 100644 --- a/ansible_collections/community/aws/changelogs/config.yaml +++ b/ansible_collections/community/aws/changelogs/config.yaml @@ -1,3 +1,4 @@ +--- changelog_filename_template: ../CHANGELOG.rst changelog_filename_version_depth: 0 changes_file: changelog.yaml @@ -9,21 +10,21 @@ notesdir: fragments prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: community.aws trivial_section_name: trivial diff --git a/ansible_collections/community/aws/docs/docsite/links.yml b/ansible_collections/community/aws/docs/docsite/links.yml index 3369b815f..b38e48055 100644 --- a/ansible_collections/community/aws/docs/docsite/links.yml +++ b/ansible_collections/community/aws/docs/docsite/links.yml @@ -7,7 +7,7 @@ # functionality for your collection. edit_on_github: repository: ansible-collections/community.aws - branch: main + branch: stable-7 # If your collection root (the directory containing galaxy.yml) does not coincide with your # repository's root, you have to specify the path to the collection root here. For example, # if the collection root is in a subdirectory ansible_collections/community/REPO_NAME diff --git a/ansible_collections/community/aws/docs/docsite/rst/CHANGELOG.rst b/ansible_collections/community/aws/docs/docsite/rst/CHANGELOG.rst index 7b5761863..651b7c763 100644 --- a/ansible_collections/community/aws/docs/docsite/rst/CHANGELOG.rst +++ b/ansible_collections/community/aws/docs/docsite/rst/CHANGELOG.rst @@ -5,6 +5,200 @@ community.aws Release Notes .. contents:: Topics +v7.1.0 +====== + +Release Summary +--------------- + +This release includes new features for the ``cloudfront_distribution`` and ``mq_broker`` modules, as well as a bugfix for the ``aws_ssm`` connection plugin needed when connecting to hosts with Bash 5.1.0 and later. + +Minor Changes +------------- + +- aws_ssm - Updated the documentation to explicitly state that an S3 bucket is required, the behavior of the files in that bucket, and requirements around that. (https://github.com/ansible-collections/community.aws/issues/1775). +- cloudfront_distribution - added support for ``cache_policy_id`` and ``origin_request_policy_id`` for behaviors (https://github.com/ansible-collections/community.aws/pull/1589) +- mq_broker - add support to wait for broker state via ``wait`` and ``wait_timeout`` parameter values (https://github.com/ansible-collections/community.aws/pull/1879). + +Bugfixes +-------- + +- aws_ssm - disable `enable-bracketed-paste` to fix issue with amazon linux 2023 and other OSes (https://github.com/ansible-collections/community.aws/issues/1756) + +v7.0.0 +====== + +Release Summary +--------------- + +This release includes some new features, bugfixes and breaking changes. Several modules have been migrated to amazon.aws and the Fully Qualified Collection Name for these modules needs to be updated. The community.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/), support for Python less than 3.7 by this collection was deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). + +Minor Changes +------------- + +- api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962). +- api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962). +- community.aws collection - apply isort code formatting to ensure consistent formatting of code (https://github.com/ansible-collections/community.aws/pull/1962) +- ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891). +- eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True`` (https://github.com/ansible-collections/community.aws/pull/1994). + +Breaking Changes / Porting Guide +-------------------------------- + +- The community.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763). +- aws_region_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_region_info``. +- aws_s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_s3_bucket_info``. +- community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection wss been deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). +- iam_access_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key``. +- iam_access_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key_info``. +- iam_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945). +- iam_managed_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954). +- iam_mfa_device_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953). +- iam_password_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_password_policy``. +- iam_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948). +- iam_role_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948). +- s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.s3_bucket_info``. +- sts_assume_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.sts_assume_role``. + +Bugfixes +-------- + +- mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832). +- opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910). + +v6.2.0 +====== + +Release Summary +--------------- + +This release includes some new features for the ``community.aws.ec2_vpc_vpn`` and ``community.aws.api_gateway`` modules. + +Minor Changes +------------- + +- api_gateway - add support for parameters ``name``, ``lookup``, ``tags`` and ``purge_tags`` (https://github.com/ansible-collections/community.aws/pull/1845). +- ec2_vpc_vpn - add support for connecting VPNs to a transit gateway (https://github.com/ansible-collections/community.aws/pull/1877). + +Bugfixes +-------- + +- Remove ``apigateway`` and ``apigateway_deployment`` from meta/runtime.yml (https://github.com/ansible-collections/community.aws/pull/1905). + +v6.1.0 +====== + +Release Summary +--------------- + +This release brings a new inventory plugin, some new features, and several bugfixes. + +Minor Changes +------------- + +- dynamodb_table - added waiter when updating indexes to avoid concurrency issues (https://github.com/ansible-collections/community.aws/pull/1866). +- dynamodb_table - increased default timeout based on time to update indexes in CI (https://github.com/ansible-collections/community.aws/pull/1866). +- iam_group - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). +- iam_role - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). +- sns_topic - refactored ARN validation handling (https://github.com/ansible-collections/community.aws/pull/1848). + +Bugfixes +-------- + +- batch_compute_environment - fixed incorrect handling of Gov Cloud ARNs in ``compute_environment_name`` parameter (https://github.com/ansible-collections/community.aws/issues/1846). +- cloudfront_distribution - The origins recognises the s3 domains with region part now (https://github.com/ansible-collections/community.aws/issues/1819). +- cloudfront_distribution - no longer crashes when waiting for completion of creation (https://github.com/ansible-collections/community.aws/issues/255). +- cloudfront_distribution - now honours the ``enabled`` setting (https://github.com/ansible-collections/community.aws/issues/1823). +- dynamodb_table - secondary indexes are now created (https://github.com/ansible-collections/community.aws/issues/1825). +- ec2_launch_template - fixed incorrect handling of Gov Cloud ARNs in ``compute_environment_name`` parameter (https://github.com/ansible-collections/community.aws/issues/1846). +- elasticache_info - remove hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). +- iam_role - fixed incorrect rejection of Gov Cloud ARNs in ``boundary`` parameter (https://github.com/ansible-collections/community.aws/issues/1846). +- msk_cluster - remove hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). +- redshift - fixed hard coded use of ``aws`` partition (https://github.com/ansible-collections/community.aws/issues/1846). + +New Plugins +----------- + +Inventory +~~~~~~~~~ + +- aws_mq - MQ broker inventory source + +v6.0.0 +====== + +Release Summary +--------------- + +This release brings some new plugins and features. Several bugfixes, breaking changes and deprecated features are also included. +The community.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. +Support for Python 3.6 has also been dropped. + + +Minor Changes +------------- + +- The ``black`` code formatter has been run across the collection to improve code consistency (https://github.com/ansible-collections/community.aws/pull/1784). +- aws_config_delivery_channel - add support for encrypted objects in S3 via KMS key (https://github.com/ansible-collections/community.aws/pull/1786). +- aws_ssm - Updated the documentation to explicitly mention that the ``ansible_user`` and ``remote_user`` variables are not supported by the plugin (https://github.com/ansible-collections/community.aws/pull/1682). +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/community.aws/pull/1810). +- cloudfront_distribution - add ``http3`` support via parameter value ``http2and3`` for parameter ``http_version`` (https://github.com/ansible-collections/community.aws/pull/1753). +- cloudfront_distribution - add ``origin_shield`` options (https://github.com/ansible-collections/community.aws/pull/1557). +- cloudfront_distribution - documented ``connection_attempts`` and ``connection_timeout`` the module was already capable of using them +- community.aws - updated document fragments based on changes in amazon.aws (https://github.com/ansible-collections/community.aws/pull/1738). +- community.aws - updated imports based on changes in amazon.aws (https://github.com/ansible-collections/community.aws/pull/1738). +- ecs_ecr - use ``compare_policies`` when comparing lifecycle policies instead of naive ``sort_json_policy_dict`` comparisons (https://github.com/ansible-collections/community.aws/pull/1551). +- elasticache - Use the ``cache.t3.small`` node type in the example. ``cache.m1.small`` is not deprecated. +- minor code fixes and enable integration tests for modules cloudfront_distribution, cloudfront_invalidation and cloudfront_origin_access_identity (https://github.com/ansible-collections/community.aws/pull/1596). +- module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/community.aws/pull/1632). +- wafv2_rule_group_info - remove unused and deprecated ``state`` parameter (https://github.com/ansible-collections/community.aws/pull/1555). + +Breaking Changes / Porting Guide +-------------------------------- + +- The community.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/community.aws/pull/1743). +- aws_ssm - the AWS SSM plugin was incorrectly prepending ``sudo`` to most commands. This behaviour was incorrect and has been removed. To execute commands as a specific user, including the ``root`` user, the ``become`` and ``become_user`` directives should be used. See the `Ansible documentation for more information <https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_privilege_escalation.html>`_ (https://github.com/ansible-collections/community.aws/issues/853). +- codebuild_project - ``tags`` parameter now accepts a dict representing the tags, rather than the boto3 format (https://github.com/ansible-collections/community.aws/pull/1643). + +Deprecated Features +------------------- + +- community.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.8 by this collection is expected to be removed in a release after 2024-12-01 (https://github.com/ansible-collections/community.aws/pull/1743). +- community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in release 7.0.0. (https://github.com/ansible-collections/community.aws/pull/1743). + +Bugfixes +-------- + +- opensearch_info - Fix the name of the domain_name key in the example (https://github.com/ansible-collections/community.aws/pull/1811). +- ses_identity - fix clearing notification topic (https://github.com/ansible-collections/community.aws/issues/150). + +New Modules +----------- + +- ec2_carrier_gateway - Manage an AWS VPC Carrier gateway +- ec2_carrier_gateway_info - Gather information about carrier gateways in AWS +- lightsail_snapshot - Creates snapshots of AWS Lightsail instances +- mq_broker - MQ broker management +- mq_broker_config - Update Amazon MQ broker configuration +- mq_broker_info - Retrieve MQ Broker details +- mq_user - Manage users in existing Amazon MQ broker +- mq_user_info - List users of an Amazon MQ broker +- ssm_inventory_info - Get SSM inventory information for EC2 instance + +v5.5.1 +====== + +Release Summary +--------------- + +This release brings several bugfixes. + +Bugfixes +-------- + +- cloudfront_distribution - no longer crashes when waiting for completion of creation (https://github.com/ansible-collections/community.aws/issues/255). +- cloudfront_distribution - now honours the ``enabled`` setting (https://github.com/ansible-collections/community.aws/issues/1823). + v5.5.0 ====== @@ -156,7 +350,7 @@ Bugfixes -------- - aws_ssm - fixes S3 bucket region detection by ensuring boto client has correct credentials and exists in correct partition (https://github.com/ansible-collections/community.aws/pull/1428). -- ec2_snapshot_copy - including tags caused the erorr "Tag specification resource type must have a value". Fix sets the ResourceType to snapshot to resolve this issue (https://github.com/ansible-collections/community.aws/pull/1419). +- ec2_snapshot_copy - including tags caused the erorr ``Tag specification resource type must have a value``. Fix sets the ResourceType to snapshot to resolve this issue (https://github.com/ansible-collections/community.aws/pull/1419). - ecs_ecr - fix a ``RepositoryNotFound`` exception when trying to create repositories in check mode (https://github.com/ansible-collections/community.aws/pull/1550). - opensearch - Fix cluster creation when using advanced security options (https://github.com/ansible-collections/community.aws/pull/1613). @@ -321,7 +515,7 @@ Bugfixes - ec2_placement_group - Handle a potential race creation during the creation of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). - elb_network_lb - fixes bug where ``ip_address_type`` in return value was not updated (https://github.com/ansible-collections/community.aws/pull/1365). -- rds_cluster - fixes bug where specifiying an rds cluster parameter group raises a `KeyError` (https://github.com/ansible-collections/community.aws/pull/1417). +- rds_cluster - fixes bug where specifiying an rds cluster parameter group raises a ``KeyError`` (https://github.com/ansible-collections/community.aws/pull/1417). - s3_sync - fix etag generation when running in FIPS mode (https://github.com/ansible-collections/community.aws/issues/757). New Modules @@ -329,6 +523,91 @@ New Modules - accessanalyzer_validate_policy_info - Performs validation of IAM policies +v4.5.1 +====== + +Release Summary +--------------- + +This release contains a minor bugfix for the ``sns_topic`` module as well as corrections to the documentation for various modules. This is the last planned release of the 4.x series. + + +Bugfixes +-------- + +- sns_topic - avoid fetching attributes from subscribers when not setting them, this can cause permissions issues (https://github.com/ansible-collections/community.aws/pull/1418). + +v4.5.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.aws`` collection. + +Minor Changes +------------- + +- ecs_service - support load balancer update for existing ecs services(https://github.com/ansible-collections/community.aws/pull/1625). +- iam_role - Drop deprecation warning, because the standard value for purge parametes is ``true`` (https://github.com/ansible-collections/community.aws/pull/1636). + +Bugfixes +-------- + +- aws_ssm - fix ``invalid literal for int`` error on some operating systems (https://github.com/ansible-collections/community.aws/issues/113). +- ecs_service - respect ``placement_constraints`` for existing ecs services (https://github.com/ansible-collections/community.aws/pull/1601). +- s3_lifecycle - Module no longer calls ``put_lifecycle_configuration`` if there is no change. (https://github.com/ansible-collections/community.aws/issues/1624) +- ssm_parameter - Fix a ``KeyError`` when adding a description to an existing parameter (https://github.com/ansible-collections/community.aws/issues/1471). + +v4.4.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.aws`` collection. +This changelog contains all changes to the modules and plugins in this collection +that have been made after the previous release. + +Minor Changes +------------- + +- elasticache_parameter_group - add ``redis6.x`` group family on the module input choices (https://github.com/ansible-collections/community.aws/pull/1476). + +Bugfixes +-------- + +- aws_ssm - fixes S3 bucket region detection by ensuring boto client has correct credentials and exists in correct partition (https://github.com/ansible-collections/community.aws/pull/1428). +- ecs_ecr - fix a ``RepositoryNotFound`` exception when trying to create repositories in check mode (https://github.com/ansible-collections/community.aws/pull/1550). +- opensearch - Fix cluster creation when using advanced security options (https://github.com/ansible-collections/community.aws/pull/1613). + +v4.3.0 +====== + +Release Summary +--------------- + +The community.aws 4.3.0 release includes a number of minor bug fixes and improvements. +Following the release of amazon.aws 5.0.0, backports to the 4.x series will be limited to security issues and bugfixes. + +Minor Changes +------------- + +- autoscaling_group_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_distribution - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_origin_access_identity - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudtrail - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- ec2_vpc_nacl - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- eks_fargate_profile - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- redshift - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- s3_bucket_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + +Bugfixes +-------- + +- ec2_placement_group - Handle a potential race creation during the creation of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). +- rds_cluster - fixes bug where specifiying an rds cluster parameter group raises a ``KeyError`` (https://github.com/ansible-collections/community.aws/pull/1417). + v4.2.0 ====== @@ -385,7 +664,7 @@ Deprecated Features - community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/community.aws/pull/1361). - iam_policy - the ``policies`` return value has been renamed ``policy_names`` and will be removed in a release after 2024-08-01, both values are currently returned (https://github.com/ansible-collections/community.aws/pull/1375). - lambda_info - The ``function`` return key returns a dictionary of dictionaries and has been deprecated. In a release after 2025-01-01, this key will be removed in favor of ``functions``, which returns a list of dictionaries (https://github.com/ansible-collections/community.aws/pull/1239). -- route53_info - The CamelCase return values for ``DelegationSets``, ``CheckerIpRanges``, and ``HealthCheck`` have been deprecated, in the future release you must use snake_case return values ``delegation_sets``, ``checker_ip_ranges``, and ``health_check`` instead respectively" (https://github.com/ansible-collections/community.aws/pull/1322). +- route53_info - The CamelCase return values for ``DelegationSets``, ``CheckerIpRanges``, and ``HealthCheck`` have been deprecated, in the future release you must use snake_case return values ``delegation_sets``, ``checker_ip_ranges``, and ``health_check`` instead respectively (https://github.com/ansible-collections/community.aws/pull/1322). Bugfixes -------- @@ -548,7 +827,7 @@ Removed Features (previously deprecated) ---------------------------------------- - aws_kms_info - the unused and deprecated ``keys_attr`` parameter has been removed (https://github.com/ansible-collections/amazon.aws/pull/1172). -- data_pipeline - the ``version`` option has always been ignored and has been removed (https://github.com/ansible-collections/community.aws/pull/1160" +- data_pipeline - the ``version`` option has always been ignored and has been removed (https://github.com/ansible-collections/community.aws/pull/1160 - ec2_eip - The ``wait_timeout`` option has been removed. It has always been ignored by the module (https://github.com/ansible-collections/community.aws/pull/1159). - ec2_lc - the ``associate_public_ip_address`` option has been removed. It has always been ignored by the module (https://github.com/ansible-collections/community.aws/pull/1158). - ec2_metric_alarm - support for using the ``<=``, ``<``, ``>`` and ``>=`` operators for comparison has been dropped. Please use ``LessThanOrEqualToThreshold``, ``LessThanThreshold``, ``GreaterThanThreshold`` or ``GreaterThanOrEqualToThreshold`` instead (https://github.com/ansible-collections/amazon.aws/pull/1164). @@ -602,6 +881,33 @@ New Modules - opensearch_info - obtain information about one or more OpenSearch or ElasticSearch domain - rds_cluster_snapshot - Manage Amazon RDS snapshots of DB clusters +v3.6.0 +====== + +Release Summary +--------------- + +Following the release of community.aws 5.0.0, 3.6.0 is a bugfix release and the final planned release for the 3.x series. + + +Minor Changes +------------- + +- autoscaling_group_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_distribution - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudfront_origin_access_identity - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- cloudtrail - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- ec2_asg_lifecycle_hook - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- ec2_vpc_nacl - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- redshift - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). +- s3_bucket_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410). + +Bugfixes +-------- + +- ec2_placement_group - Handle a potential race creation during the creation of a new Placement Group (https://github.com/ansible-collections/community.aws/pull/1477). +- s3_lifecycle - fix bug when deleting rules with an empty prefix (https://github.com/ansible-collections/community.aws/pull/1398). + v3.5.0 ====== @@ -649,7 +955,7 @@ Deprecated Features ------------------- - aws_codebuild - The ``tags`` parameter currently uses a non-standard format and has been deprecated. In release 6.0.0 this parameter will accept a simple key/value pair dictionary instead of the current list of dictionaries. It is recommended to migrate to using the resource_tags parameter which already accepts the simple dictionary format (https://github.com/ansible-collections/community.aws/pull/1221). -- route53_info - The CamelCase return values for ``HostedZones``, ``ResourceRecordSets``, and ``HealthChecks`` have been deprecated, in the future release you must use snake_case return values ``hosted_zones``, ``resource_record_sets``, and ``health_checks`` instead respectively". +- route53_info - The CamelCase return values for ``HostedZones``, ``ResourceRecordSets``, and ``HealthChecks`` have been deprecated, in the future release you must use snake_case return values ``hosted_zones``, ``resource_record_sets``, and ``health_checks`` instead respectively. Bugfixes -------- @@ -914,6 +1220,65 @@ Bugfixes - aws_eks - Fix EKS cluster creation with short names (https://github.com/ansible-collections/community.aws/pull/818). +v2.6.1 +====== + +Release Summary +--------------- + +Bump collection from 2.6.0 to 2.6.1 due to a publishing error with 2.6.0. This release supersedes 2.6.0 entirely, users should skip 2.6.0. + +v2.6.0 +====== + +Release Summary +--------------- + +This is the last planned 2.x release of the ``community.aws`` collection. +Consider upgrading to the latest version of ``community.aws`` soon. + +Minor Changes +------------- + +- ecs_service - ``deployment_circuit_breaker`` has been added as a supported feature (https://github.com/ansible-collections/community.aws/pull/1215). +- ecs_service - add ``service`` alias to address the ecs service name with the same parameter as the ecs_service_info module is doing (https://github.com/ansible-collections/community.aws/pull/1187). +- ecs_service_info - add ``name`` alias to address the ecs service name with the same parameter as the ecs_service module is doing (https://github.com/ansible-collections/community.aws/pull/1187). + +Bugfixes +-------- + +- ecs_service - fix broken change detect of ``health_check_grace_period_seconds`` parameter when not specified (https://github.com/ansible-collections/community.aws/pull/1212). +- ecs_service - use default cluster name of ``default`` when not input (https://github.com/ansible-collections/community.aws/pull/1212). +- ecs_task - dont require ``cluster`` and use name of ``default`` when not input (https://github.com/ansible-collections/community.aws/pull/1212). +- wafv2_ip_set - fix bug where incorrect changed state was returned when only changing the description (https://github.com/ansible-collections/community.aws/pull/1211). + +v2.5.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.aws`` collection. + +Minor Changes +------------- + +- iam_policy - update broken examples and add RETURN section to documentation; add extra integration tests for idempotency check mode runs (https://github.com/ansible-collections/community.aws/pull/1093). +- iam_role - delete inline policies prior to deleting role (https://github.com/ansible-collections/community.aws/pull/1054). +- iam_role - remove global vars and refactor accordingly (https://github.com/ansible-collections/community.aws/pull/1054). + +Bugfixes +-------- + +- ecs_service - add missing change detect of ``health_check_grace_period_seconds`` parameter (https://github.com/ansible-collections/community.aws/pull/1145). +- ecs_service - fix broken compare of ``task_definition`` that results always in a changed task (https://github.com/ansible-collections/community.aws/pull/1145). +- ecs_service - fix validation for ``placement_constraints``. It's possible to use ``distinctInstance`` placement constraint now (https://github.com/ansible-collections/community.aws/issues/1058) +- ecs_taskdefinition - fix broken change detect of ``launch_type`` parameter (https://github.com/ansible-collections/community.aws/pull/1145). +- execute_lambda - fix check mode and update RETURN documentation (https://github.com/ansible-collections/community.aws/pull/1115). +- iam_policy - require one of ``policy_document`` and ``policy_json`` when state is present to prevent MalformedPolicyDocumentException from being thrown (https://github.com/ansible-collections/community.aws/pull/1093). +- s3_lifecycle - add support of value *0* for ``transition_days`` (https://github.com/ansible-collections/community.aws/pull/1077). +- s3_lifecycle - check that configuration is complete before returning (https://github.com/ansible-collections/community.aws/pull/1085). + v2.4.0 ====== @@ -1534,7 +1899,7 @@ Bugfixes - cloudfront_distribution - Always add field_level_encryption_id to cache behaviour to match AWS requirements - cloudwatchlogs_log_group - Fix a KeyError when updating a log group that does not have a retention period (https://github.com/ansible/ansible/issues/47945) - cloudwatchlogs_log_group_info - remove limitation of max 50 results -- ec2_asg - Ensure "wait" is honored during replace operations +- ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing transit gateways - ec2_transit_gateway - fixed issue where auto_attach set to yes was not being honored (https://github.com/ansible/ansible/issues/61907) diff --git a/ansible_collections/community/aws/meta/runtime.yml b/ansible_collections/community/aws/meta/runtime.yml index 6fd39ee0f..5d05436df 100644 --- a/ansible_collections/community/aws/meta/runtime.yml +++ b/ansible_collections/community/aws/meta/runtime.yml @@ -1,223 +1,221 @@ --- -requires_ansible: '>=2.11.0' +requires_ansible: '>=2.12.0' action_groups: aws: - - accessanalyzer_validate_policy_info - - acm_certificate - - acm_certificate_info - - api_gateway - - api_gateway_domain - - application_autoscaling_policy - - autoscaling_complete_lifecycle_action - - autoscaling_instance_refresh_info - - autoscaling_instance_refresh - - autoscaling_launch_config_find - - autoscaling_launch_config_info - - autoscaling_launch_config - - autoscaling_lifecycle_hook - - autoscaling_policy - - autoscaling_scheduled_action - - aws_acm - - aws_acm_info - - aws_api_gateway - - aws_api_gateway_domain - - aws_application_scaling_policy - - aws_batch_compute_environment - - aws_batch_job_definition - - aws_batch_job_queue - - aws_codebuild - - aws_codecommit - - aws_codepipeline - - aws_config_aggregation_authorization - - aws_config_aggregator - - aws_config_delivery_channel - - aws_config_recorder - - aws_config_rule - - aws_direct_connect_confirm_connection - - aws_direct_connect_connection - - aws_direct_connect_gateway - - aws_direct_connect_link_aggregation_group - - aws_direct_connect_virtual_interface - - aws_eks_cluster - - aws_elasticbeanstalk_app - - aws_glue_connection - - aws_glue_crawler - - aws_glue_job - - aws_inspector_target - - aws_msk_cluster - - aws_msk_config - - aws_region_info - - aws_s3_bucket_info - - aws_s3_cors - - aws_secret - - aws_ses_identity - - aws_ses_identity_policy - - aws_ses_rule_set - - aws_sgw_info - - aws_ssm_parameter_store - - aws_step_functions_state_machine - - aws_step_functions_state_machine_execution - - aws_waf_condition - - aws_waf_info - - aws_waf_rule - - aws_waf_web_acl - - batch_compute_environment - - batch_job_definition - - batch_job_queue - - cloudformation_exports_info - - cloudformation_stack_set - - cloudfront_distribution - - cloudfront_distribution_info - - cloudfront_info - - cloudfront_invalidation - - cloudfront_origin_access_identity - - cloudfront_response_headers_policy - - codebuild_project - - codecommit_repository - - codepipeline - - config_aggregation_authorization - - config_aggregator - - config_delivery_channel - - config_recorder - - config_rule - - data_pipeline - - directconnect_confirm_connection - - directconnect_connection - - directconnect_gateway - - directconnect_link_aggregation_group - - directconnect_virtual_interface - - dms_endpoint - - dms_replication_subnet_group - - dynamodb_table - - dynamodb_ttl - - ec2_ami_copy - - ec2_asg - - ec2_asg_info - - ec2_asg_scheduled_action - - ec2_asg_instance_refresh - - ec2_asg_instance_refresh_info - - ec2_asg_lifecycle_hook - - ec2_customer_gateway - - ec2_customer_gateway_info - - ec2_elb - - ec2_launch_template - - ec2_lc - - ec2_lc_find - - ec2_lc_info - - ec2_metric_alarm - - ec2_placement_group - - ec2_placement_group_info - - ec2_scaling_policy - - ec2_snapshot_copy - - ec2_transit_gateway - - ec2_transit_gateway_info - - ec2_transit_gateway_vpc_attachment - - ec2_transit_gateway_vpc_attachment_info - - ec2_vpc_egress_igw - - ec2_vpc_nacl - - ec2_vpc_nacl_info - - ec2_vpc_peer - - ec2_vpc_peering_info - - ec2_vpc_vgw - - ec2_vpc_vgw_info - - ec2_vpc_vpn - - ec2_vpc_vpn_info - - ec2_win_password - - ecs_attribute - - ecs_cluster - - ecs_ecr - - ecs_service - - ecs_service_info - - ecs_tag - - ecs_task - - ecs_taskdefinition - - ecs_taskdefinition_info - - efs - - efs_info - - eks_cluster - - efs_tag - - eks_fargate_profile - - eks_nodegroup - - elasticbeanstalk_app - - elasticache - - elasticache_info - - elasticache_parameter_group - - elasticache_snapshot - - elasticache_subnet_group - - elb_classic_lb - - elb_classic_lb_info - - elb_instance - - elb_network_lb - - elb_target - - elb_target_group - - elb_target_group_info - - elb_target_info - - glue_connection - - glue_crawler - - glue_job - - iam_access_key - - iam_access_key_info - - iam_group - - iam_managed_policy - - iam_mfa_device_info - - iam_password_policy - - iam_role - - iam_role_info - - iam_saml_federation - - iam_server_certificate - - iam_server_certificate_info - - inspector_target - - kinesis_stream - - lightsail - - lightsail_static_ip - - msk_cluster - - msk_config - - networkfirewall - - networkfirewall_info - - networkfirewall_policy - - networkfirewall_policy_info - - networkfirewall_rule_group - - networkfirewall_rule_group_info - - opensearch - - opensearch_info - - redshift - - redshift_cross_region_snapshots - - redshift_info - - redshift_subnet_group - - s3_bucket_notification - - s3_bucket_info - - s3_cors - - s3_lifecycle - - s3_logging - - s3_metrics_configuration - - s3_sync - - s3_website - - secretsmanager_secret - - ses_identity - - ses_identity_policy - - ses_rule_set - - sns - - sns_topic - - sns_topic_info - - sqs_queue - - ssm_parameter - - stepfunctions_state_machine - - stepfunctions_state_machine_execution - - sts_assume_role - - sts_session_token - - storagegateway_info - - waf_condition - - waf_info - - waf_rule - - waf_web_acl - - wafv2_ip_set - - wafv2_ip_set_info - - wafv2_resources - - wafv2_resources_info - - wafv2_rule_group - - wafv2_rule_group_info - - wafv2_web_acl - - wafv2_web_acl_info + - accessanalyzer_validate_policy_info + - acm_certificate + - acm_certificate_info + - api_gateway + - api_gateway_domain + - api_gateway_info + - application_autoscaling_policy + - autoscaling_complete_lifecycle_action + - autoscaling_instance_refresh + - autoscaling_instance_refresh_info + - autoscaling_launch_config + - autoscaling_launch_config_find + - autoscaling_launch_config_info + - autoscaling_lifecycle_hook + - autoscaling_policy + - autoscaling_scheduled_action + - aws_acm + - aws_acm_info + - aws_api_gateway + - aws_api_gateway_domain + - aws_application_scaling_policy + - aws_batch_compute_environment + - aws_batch_job_definition + - aws_batch_job_queue + - aws_codebuild + - aws_codecommit + - aws_codepipeline + - aws_config_aggregation_authorization + - aws_config_aggregator + - aws_config_delivery_channel + - aws_config_recorder + - aws_config_rule + - aws_direct_connect_confirm_connection + - aws_direct_connect_connection + - aws_direct_connect_gateway + - aws_direct_connect_link_aggregation_group + - aws_direct_connect_virtual_interface + - aws_eks_cluster + - aws_elasticbeanstalk_app + - aws_glue_connection + - aws_glue_crawler + - aws_glue_job + - aws_inspector_target + - aws_msk_cluster + - aws_msk_config + - aws_region_info + - aws_s3_cors + - aws_secret + - aws_ses_identity + - aws_ses_identity_policy + - aws_ses_rule_set + - aws_sgw_info + - aws_ssm_parameter_store + - aws_step_functions_state_machine + - aws_step_functions_state_machine_execution + - aws_waf_condition + - aws_waf_info + - aws_waf_rule + - aws_waf_web_acl + - batch_compute_environment + - batch_job_definition + - batch_job_queue + - cloudformation_exports_info + - cloudformation_stack_set + - cloudfront_distribution + - cloudfront_distribution_info + - cloudfront_info + - cloudfront_invalidation + - cloudfront_origin_access_identity + - cloudfront_response_headers_policy + - codebuild_project + - codecommit_repository + - codepipeline + - config_aggregation_authorization + - config_aggregator + - config_delivery_channel + - config_recorder + - config_rule + - data_pipeline + - directconnect_confirm_connection + - directconnect_connection + - directconnect_gateway + - directconnect_link_aggregation_group + - directconnect_virtual_interface + - dms_endpoint + - dms_replication_subnet_group + - dynamodb_table + - dynamodb_ttl + - ec2_ami_copy + - ec2_asg + - ec2_asg_info + - ec2_asg_instance_refresh + - ec2_asg_instance_refresh_info + - ec2_asg_lifecycle_hook + - ec2_asg_scheduled_action + - ec2_customer_gateway + - ec2_customer_gateway_info + - ec2_elb + - ec2_launch_template + - ec2_lc + - ec2_lc_find + - ec2_lc_info + - ec2_metric_alarm + - ec2_placement_group + - ec2_placement_group_info + - ec2_scaling_policy + - ec2_snapshot_copy + - ec2_transit_gateway + - ec2_transit_gateway_info + - ec2_transit_gateway_vpc_attachment + - ec2_transit_gateway_vpc_attachment_info + - ec2_vpc_egress_igw + - ec2_vpc_nacl + - ec2_vpc_nacl_info + - ec2_vpc_peer + - ec2_vpc_peering_info + - ec2_vpc_vgw + - ec2_vpc_vgw_info + - ec2_vpc_vpn + - ec2_vpc_vpn_info + - ec2_win_password + - ecs_attribute + - ecs_cluster + - ecs_ecr + - ecs_service + - ecs_service_info + - ecs_tag + - ecs_task + - ecs_taskdefinition + - ecs_taskdefinition_info + - efs + - efs_info + - efs_tag + - eks_cluster + - eks_fargate_profile + - eks_nodegroup + - elasticache + - elasticache_info + - elasticache_parameter_group + - elasticache_snapshot + - elasticache_subnet_group + - elasticbeanstalk_app + - elb_classic_lb + - elb_classic_lb_info + - elb_instance + - elb_network_lb + - elb_target + - elb_target_group + - elb_target_group_info + - elb_target_info + - glue_connection + - glue_crawler + - glue_job + - iam_saml_federation + - iam_server_certificate + - iam_server_certificate_info + - inspector_target + - kinesis_stream + - lightsail + - lightsail_snapshot + - lightsail_static_ip + - mq_broker + - mq_broker_config + - mq_broker_info + - mq_user + - mq_user_info + - msk_cluster + - msk_config + - networkfirewall + - networkfirewall_info + - networkfirewall_policy + - networkfirewall_policy_info + - networkfirewall_rule_group + - networkfirewall_rule_group_info + - opensearch + - opensearch_info + - redshift + - redshift_cross_region_snapshots + - redshift_info + - redshift_subnet_group + - route53_wait + - s3_bucket_notification + - s3_cors + - s3_lifecycle + - s3_logging + - s3_metrics_configuration + - s3_sync + - s3_website + - secretsmanager_secret + - ses_identity + - ses_identity_policy + - ses_rule_set + - sns + - sns_topic + - sns_topic_info + - sqs_queue + - ssm_inventory_info + - ssm_parameter + - stepfunctions_state_machine + - stepfunctions_state_machine_execution + - storagegateway_info + - sts_session_token + - waf_condition + - waf_info + - waf_rule + - waf_web_acl + - wafv2_ip_set + - wafv2_ip_set_info + - wafv2_resources + - wafv2_resources_info + - wafv2_rule_group + - wafv2_rule_group_info + - wafv2_web_acl + - wafv2_web_acl_info plugin_routing: modules: autoscaling_group_info: @@ -317,9 +315,11 @@ plugin_routing: aws_msk_config: # Deprecation for this alias should not *start* prior to 2024-09-01 redirect: community.aws.msk_config + aws_region_info: + redirect: amazon.aws.aws_region_info aws_s3_bucket_info: # Deprecation for this alias should not *start* prior to 2024-09-01 - redirect: community.aws.s3_bucket_info + redirect: amazon.aws.s3_bucket_info aws_s3_cors: # Deprecation for this alias should not *start* prior to 2024-09-01 redirect: community.aws.s3_cors @@ -445,10 +445,26 @@ plugin_routing: execute_lambda: # Deprecation for this alias should not *start* prior to 2024-09-01 redirect: amazon.aws.lambda_execute + iam_access_key: + redirect: amazon.aws.iam_access_key + iam_access_key_info: + redirect: amazon.aws.iam_access_key_info + iam_group: + redirect: amazon.aws.iam_group + iam_managed_policy: + redirect: amazon.aws.iam_managed_policy + iam_mfa_device_info: + redirect: amazon.aws.iam_mfa_device_info + iam_password_policy: + redirect: amazon.aws.iam_password_policy iam_policy: redirect: amazon.aws.iam_policy iam_policy_info: redirect: amazon.aws.iam_policy_info + iam_role: + redirect: amazon.aws.iam_role + iam_role_info: + redirect: amazon.aws.iam_role_info iam_user: redirect: amazon.aws.iam_user iam_user_info: @@ -499,6 +515,10 @@ plugin_routing: redirect: amazon.aws.route53_info route53_zone: redirect: amazon.aws.route53_zone + s3_bucket_info: + redirect: amazon.aws.s3_bucket_info + sts_assume_role: + redirect: amazon.aws.sts_assume_role module_utils: route53: - redirect: amazon.aws.route53 + redirect: amazon.aws.route53
\ No newline at end of file diff --git a/ansible_collections/community/aws/plugins/connection/aws_ssm.py b/ansible_collections/community/aws/plugins/connection/aws_ssm.py index 68d761c9d..5186179f4 100644 --- a/ansible_collections/community/aws/plugins/connection/aws_ssm.py +++ b/ansible_collections/community/aws/plugins/connection/aws_ssm.py @@ -1,24 +1,47 @@ -# Based on the ssh connection plugin by Michael DeHaan -# +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Pat Sharkey <psharkey@cleo.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Based on the ssh connection plugin by Michael DeHaan -DOCUMENTATION = ''' -author: -- Pat Sharkey (@psharkey) <psharkey@cleo.com> -- HanumanthaRao MVL (@hanumantharaomvl) <hanumanth@flux7.com> -- Gaurav Ashtikar (@gau1991) <gaurav.ashtikar@flux7.com> +DOCUMENTATION = r""" name: aws_ssm -short_description: execute via AWS Systems Manager +author: + - Pat Sharkey (@psharkey) <psharkey@cleo.com> + - HanumanthaRao MVL (@hanumantharaomvl) <hanumanth@flux7.com> + - Gaurav Ashtikar (@gau1991) <gaurav.ashtikar@flux7.com> + +short_description: connect to EC2 instances via AWS Systems Manager description: -- This connection plugin allows ansible to execute tasks on an EC2 instance via the aws ssm CLI. + - This connection plugin allows Ansible to execute tasks on an EC2 instance via an AWS SSM Session. +notes: + - The C(community.aws.aws_ssm) connection plugin does not support using the ``remote_user`` and + ``ansible_user`` variables to configure the remote user. The ``become_user`` parameter should + be used to configure which user to run commands as. Remote commands will often default to + running as the ``ssm-agent`` user, however this will also depend on how SSM has been configured. + - This plugin requires an S3 bucket to send files to/from the remote instance. This is required even for modules + which do not explicitly send files (such as the C(shell) or C(command) modules), because Ansible sends over the C(.py) files of the module itself, via S3. + - Files sent via S3 will be named in S3 with the EC2 host ID (e.g. C(i-123abc/)) as the prefix. + - The files in S3 will be deleted by the end of the playbook run. If the play is terminated ungracefully, the files may remain in the bucket. + If the bucket has versioning enabled, the files will remain in version history. If your tasks involve sending secrets to/from the remote instance + (e.g. within a C(shell) command, or a SQL password in the C(community.postgresql.postgresql_query) module) then those passwords will be included in + plaintext in those files in S3 indefinitely, visible to anyone with access to that bucket. Therefore it is recommended to use a bucket with versioning + disabled/suspended. + - The files in S3 will be deleted even if the C(keep_remote_files) setting is C(true). + requirements: -- The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent). -- The control machine must have the aws session manager plugin installed. -- The remote EC2 linux instance must have the curl installed. + - The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent). + U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) + - The control machine must have the AWS session manager plugin installed. + U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) + - The remote EC2 Linux instance must have curl installed. + - The remote EC2 Linux instance and the controller both need network connectivity to S3. + - The remote instance does not require IAM credentials for S3. This module will generate a presigned URL for S3 from the controller, + and then will pass that URL to the target over SSM, telling the target to download/upload from S3 with C(curl). + - The controller requires IAM permissions to upload, download and delete files from the specified S3 bucket. This includes + `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, `s3:DeleteObject` and `s3:GetBucketLocation`. + options: access_key_id: description: The STS access key to use when connecting via session-manager. @@ -99,7 +122,11 @@ options: vars: - name: ansible_aws_ssm_bucket_sse_kms_key_id ssm_document: - description: SSM document to use when connecting. + description: + - SSM Session document to use when connecting. + - To configure the remote_user (when C(become=False), it is possible to use an SSM Session + document and define the C(runAsEnabled) and C(runAsDefaultUser) parameters. See also + U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-schema.html) vars: - name: ansible_aws_ssm_document version_added: 5.2.0 @@ -115,10 +142,10 @@ options: version_added: 5.2.0 vars: - name: ansible_aws_ssm_s3_addressing_style -''' - -EXAMPLES = r''' +""" +EXAMPLES = r""" +--- # Wait for SSM Agent to be available on the Instance - name: Wait for connection to be available vars: @@ -171,17 +198,19 @@ EXAMPLES = r''' path: C:\Windows\temp state: directory +--- + # Making use of Dynamic Inventory Plugin # ======================================= -# aws_ec2.yml (Dynamic Inventory - Linux) -# This will return the Instance IDs matching the filter -#plugin: aws_ec2 -#regions: -# - us-east-1 -#hostnames: -# - instance-id -#filters: -# tag:SSMTag: ssmlinux +# # aws_ec2.yml (Dynamic Inventory - Linux) +# plugin: aws_ec2 +# regions: +# - us-east-1 +# hostnames: +# - instance-id +# # This will return the Instances with the tag "SSMTag" set to "ssmlinux" +# filters: +# tag:SSMTag: ssmlinux # ----------------------- - name: install aws-cli hosts: all @@ -191,20 +220,23 @@ EXAMPLES = r''' ansible_aws_ssm_bucket_name: nameofthebucket ansible_aws_ssm_region: us-east-1 tasks: - - name: aws-cli - raw: yum install -y awscli - tags: aws-cli + - name: aws-cli + raw: yum install -y awscli + tags: aws-cli + +--- + # Execution: ansible-playbook linux.yaml -i aws_ec2.yml -# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection. # ===================================================== -# aws_ec2.yml (Dynamic Inventory - Windows) -#plugin: aws_ec2 -#regions: -# - us-east-1 -#hostnames: -# - instance-id -#filters: -# tag:SSMTag: ssmwindows +# # aws_ec2.yml (Dynamic Inventory - Windows) +# plugin: aws_ec2 +# regions: +# - us-east-1 +# hostnames: +# - instance-id +# # This will return the Instances with the tag "SSMTag" set to "ssmwindows" +# filters: +# tag:SSMTag: ssmwindows # ----------------------- - name: Create a dir. hosts: all @@ -219,10 +251,13 @@ EXAMPLES = r''' win_file: path: C:\Temp\SSM_Testing5 state: directory + +--- + # Execution: ansible-playbook win_file.yaml -i aws_ec2.yml # The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection. -# Install a Nginx Package on Linux Instance; with specific SSE for file transfer +# Install a Nginx Package on Linux Instance; with specific SSE CMK used for the file transfer - name: Install a Nginx Package vars: ansible_connection: aws_ssm @@ -236,7 +271,7 @@ EXAMPLES = r''' name: nginx state: present -# Install a Nginx Package on Linux Instance; with dedicated SSM document +# Install a Nginx Package on Linux Instance; using the specified SSM document - name: Install a Nginx Package vars: ansible_connection: aws_ssm @@ -248,7 +283,7 @@ EXAMPLES = r''' yum: name: nginx state: present -''' +""" import os import getpass @@ -292,9 +327,10 @@ def _ssm_retry(func): * remaining_tries is <2 * retries limit reached """ + @wraps(func) def wrapped(self, *args, **kwargs): - remaining_tries = int(self.get_option('reconnection_retries')) + 1 + remaining_tries = int(self.get_option("reconnection_retries")) + 1 cmd_summary = f"{args[0]}..." for attempt in range(remaining_tries): try: @@ -305,7 +341,7 @@ def _ssm_retry(func): except (AnsibleConnectionFailure, Exception) as e: if attempt == remaining_tries - 1: raise - pause = 2 ** attempt - 1 + pause = 2**attempt - 1 pause = min(pause, 30) if isinstance(e, AnsibleConnectionFailure): @@ -325,28 +361,32 @@ def _ssm_retry(func): continue return return_tuple + return wrapped def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): - yield lst[i:i + n] + yield lst[i:i + n] # fmt: skip class Connection(ConnectionBase): - ''' AWS SSM based connections ''' + """AWS SSM based connections""" + + transport = "community.aws.aws_ssm" + default_user = "" - transport = 'community.aws.aws_ssm' allow_executable = False allow_extras = True has_pipelining = False is_windows = False + _client = None _s3_client = None _session = None _stdout = None - _session_id = '' + _session_id = "" _timeout = False MARK_LENGTH = 26 @@ -377,18 +417,20 @@ class Connection(ConnectionBase): (new AWS regions and new buckets in a region other than the one we're running in) """ - region_name = self.get_option('region') or 'us-east-1' - profile_name = self.get_option('profile') or '' + region_name = self.get_option("region") or "us-east-1" + profile_name = self.get_option("profile") or "" self._vvvv("_get_bucket_endpoint: S3 (global)") tmp_s3_client = self._get_boto_client( - 's3', region_name=region_name, profile_name=profile_name, + "s3", + region_name=region_name, + profile_name=profile_name, ) # Fetch the location of the bucket so we can open a client against the 'right' endpoint # This /should/ always work bucket_location = tmp_s3_client.get_bucket_location( - Bucket=(self.get_option('bucket_name')), + Bucket=(self.get_option("bucket_name")), ) - bucket_region = bucket_location['LocationConstraint'] + bucket_region = bucket_location["LocationConstraint"] if self.get_option("bucket_endpoint_url"): return self.get_option("bucket_endpoint_url"), bucket_region @@ -396,28 +438,35 @@ class Connection(ConnectionBase): # Create another client for the region the bucket lives in, so we can nab the endpoint URL self._vvvv(f"_get_bucket_endpoint: S3 (bucket region) - {bucket_region}") s3_bucket_client = self._get_boto_client( - 's3', region_name=bucket_region, profile_name=profile_name, + "s3", + region_name=bucket_region, + profile_name=profile_name, ) return s3_bucket_client.meta.endpoint_url, s3_bucket_client.meta.region_name def _init_clients(self): self._vvvv("INITIALIZE BOTO3 CLIENTS") - profile_name = self.get_option('profile') or '' - region_name = self.get_option('region') + profile_name = self.get_option("profile") or "" + region_name = self.get_option("region") # The SSM Boto client, currently used to initiate and manage the session # Note: does not handle the actual SSM session traffic self._vvvv("SETUP BOTO3 CLIENTS: SSM") ssm_client = self._get_boto_client( - 'ssm', region_name=region_name, profile_name=profile_name, + "ssm", + region_name=region_name, + profile_name=profile_name, ) self._client = ssm_client s3_endpoint_url, s3_region_name = self._get_bucket_endpoint() self._vvvv(f"SETUP BOTO3 CLIENTS: S3 {s3_endpoint_url}") s3_bucket_client = self._get_boto_client( - 's3', region_name=s3_region_name, endpoint_url=s3_endpoint_url, profile_name=profile_name, + "s3", + region_name=s3_region_name, + endpoint_url=s3_endpoint_url, + profile_name=profile_name, ) self._s3_client = s3_bucket_client @@ -430,21 +479,21 @@ class Connection(ConnectionBase): self.host = self._play_context.remote_addr - if getattr(self._shell, "SHELL_FAMILY", '') == 'powershell': + if getattr(self._shell, "SHELL_FAMILY", "") == "powershell": self.delegate = None self.has_native_async = True self.always_pipeline_modules = True - self.module_implementation_preferences = ('.ps1', '.exe', '') + self.module_implementation_preferences = (".ps1", ".exe", "") self.protocol = None self.shell_id = None - self._shell_type = 'powershell' + self._shell_type = "powershell" self.is_windows = True def __del__(self): self.close() def _connect(self): - ''' connect to the host via ssm ''' + """connect to the host via ssm""" self._play_context.remote_user = getpass.getuser() @@ -453,36 +502,37 @@ class Connection(ConnectionBase): return self def reset(self): - ''' start a fresh ssm session ''' - self._vvvv('reset called on ssm connection') + """start a fresh ssm session""" + self._vvvv("reset called on ssm connection") + self.close() return self.start_session() def start_session(self): - ''' start ssm session ''' + """start ssm session""" - if self.get_option('instance_id') is None: + if self.get_option("instance_id") is None: self.instance_id = self.host else: - self.instance_id = self.get_option('instance_id') + self.instance_id = self.get_option("instance_id") self._vvv(f"ESTABLISH SSM CONNECTION TO: {self.instance_id}") - executable = self.get_option('plugin') - if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')): + executable = self.get_option("plugin") + if not os.path.exists(to_bytes(executable, errors="surrogate_or_strict")): raise AnsibleError(f"failed to find the executable specified {executable}.") self._init_clients() self._vvvv(f"START SSM SESSION: {self.instance_id}") start_session_args = dict(Target=self.instance_id, Parameters={}) - document_name = self.get_option('ssm_document') + document_name = self.get_option("ssm_document") if document_name is not None: - start_session_args['DocumentName'] = document_name + start_session_args["DocumentName"] = document_name response = self._client.start_session(**start_session_args) - self._session_id = response['SessionId'] + self._session_id = response["SessionId"] - region_name = self.get_option('region') - profile_name = self.get_option('profile') or '' + region_name = self.get_option("region") + profile_name = self.get_option("profile") or "" cmd = [ executable, json.dumps(response), @@ -506,7 +556,7 @@ class Connection(ConnectionBase): ) os.close(stdout_w) - self._stdout = os.fdopen(stdout_r, 'rb', 0) + self._stdout = os.fdopen(stdout_r, "rb", 0) self._session = session self._poll_stdout = select.poll() self._poll_stdout.register(self._stdout, select.POLLIN) @@ -520,7 +570,7 @@ class Connection(ConnectionBase): @_ssm_retry def exec_command(self, cmd, in_data=None, sudoable=True): - ''' run a command on the ssm host ''' + """run a command on the ssm host""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) @@ -541,20 +591,19 @@ class Connection(ConnectionBase): self._flush_stderr(session) for chunk in chunks(cmd, 1024): - session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict')) + session.stdin.write(to_bytes(chunk, errors="surrogate_or_strict")) # Read stdout between the markers - stdout = '' - win_line = '' + stdout = "" + win_line = "" begin = False - stop_time = int(round(time.time())) + self.get_option('ssm_timeout') + stop_time = int(round(time.time())) + self.get_option("ssm_timeout") while session.poll() is None: remaining = stop_time - int(round(time.time())) if remaining < 1: self._timeout = True self._vvvv(f"EXEC timeout stdout: \n{to_text(stdout)}") - raise AnsibleConnectionFailure( - f"SSM exec_command timeout on host: {self.instance_id}") + raise AnsibleConnectionFailure(f"SSM exec_command timeout on host: {self.instance_id}") if self._poll_stdout.poll(1000): line = self._filter_ansi(self._stdout.readline()) self._vvvv(f"EXEC stdout line: \n{to_text(line)}") @@ -569,7 +618,7 @@ class Connection(ConnectionBase): if mark_start in line: begin = True if not line.startswith(mark_start): - stdout = '' + stdout = "" continue if begin: if mark_end in line: @@ -584,7 +633,7 @@ class Connection(ConnectionBase): return (returncode, stdout, stderr) def _prepare_terminal(self): - ''' perform any one-time terminal settings ''' + """perform any one-time terminal settings""" # No windows setup for now if self.is_windows: return @@ -599,16 +648,12 @@ class Connection(ConnectionBase): disable_echo_cmd = to_bytes("stty -echo\n", errors="surrogate_or_strict") disable_prompt_complete = None - end_mark = "".join( - [random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)] - ) + end_mark = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)]) disable_prompt_cmd = to_bytes( - "PS1='' ; printf '\\n%s\\n' '" + end_mark + "'\n", + "PS1='' ; bind 'set enable-bracketed-paste off'; printf '\\n%s\\n' '" + end_mark + "'\n", errors="surrogate_or_strict", ) - disable_prompt_reply = re.compile( - r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE - ) + disable_prompt_reply = re.compile(r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE) stdout = "" # Custom command execution for when we're waiting for startup @@ -618,9 +663,7 @@ class Connection(ConnectionBase): if remaining < 1: self._timeout = True self._vvvv(f"PRE timeout stdout: \n{to_bytes(stdout)}") - raise AnsibleConnectionFailure( - f"SSM start_session timeout on host: {self.instance_id}" - ) + raise AnsibleConnectionFailure(f"SSM start_session timeout on host: {self.instance_id}") if self._poll_stdout.poll(1000): stdout += to_text(self._stdout.read(1024)) self._vvvv(f"PRE stdout line: \n{to_bytes(stdout)}") @@ -654,36 +697,32 @@ class Connection(ConnectionBase): if disable_prompt_complete is False: match = disable_prompt_reply.search(stdout) if match: - stdout = stdout[match.end():] + stdout = stdout[match.end():] # fmt: skip disable_prompt_complete = True if not disable_prompt_complete: - raise AnsibleConnectionFailure( - f"SSM process closed during _prepare_terminal on host: {self.instance_id}" - ) + raise AnsibleConnectionFailure(f"SSM process closed during _prepare_terminal on host: {self.instance_id}") self._vvvv("PRE Terminal configured") def _wrap_command(self, cmd, sudoable, mark_start, mark_end): - ''' wrap command so stdout and status can be extracted ''' + """wrap command so stdout and status can be extracted""" if self.is_windows: if not cmd.startswith(" ".join(_common_args) + " -EncodedCommand"): cmd = self._shell._encode_script(cmd, preserve_rc=True) cmd = cmd + "; echo " + mark_start + "\necho " + mark_end + "\n" else: - if sudoable: - cmd = "sudo " + cmd cmd = ( f"printf '%s\\n' '{mark_start}';\n" f"echo | {cmd};\n" f"printf '\\n%s\\n%s\\n' \"$?\" '{mark_end}';\n" - ) + ) # fmt: skip self._vvvv(f"_wrap_command: \n'{to_text(cmd)}'") return cmd def _post_process(self, stdout, mark_begin): - ''' extract command status and strip unwanted lines ''' + """extract command status and strip unwanted lines""" if not self.is_windows: # Get command return code @@ -691,50 +730,50 @@ class Connection(ConnectionBase): # Throw away final lines for _x in range(0, 3): - stdout = stdout[:stdout.rfind('\n')] + stdout = stdout[:stdout.rfind('\n')] # fmt: skip return (returncode, stdout) # Windows is a little more complex # Value of $LASTEXITCODE will be the line after the mark - trailer = stdout[stdout.rfind(mark_begin):] + trailer = stdout[stdout.rfind(mark_begin):] # fmt: skip last_exit_code = trailer.splitlines()[1] if last_exit_code.isdigit: returncode = int(last_exit_code) else: returncode = -1 # output to keep will be before the mark - stdout = stdout[:stdout.rfind(mark_begin)] + stdout = stdout[:stdout.rfind(mark_begin)] # fmt: skip # If it looks like JSON remove any newlines - if stdout.startswith('{'): - stdout = stdout.replace('\n', '') + if stdout.startswith("{"): + stdout = stdout.replace("\n", "") return (returncode, stdout) def _filter_ansi(self, line): - ''' remove any ANSI terminal control codes ''' + """remove any ANSI terminal control codes""" line = to_text(line) if self.is_windows: - osc_filter = re.compile(r'\x1b\][^\x07]*\x07') - line = osc_filter.sub('', line) - ansi_filter = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]') - line = ansi_filter.sub('', line) + osc_filter = re.compile(r"\x1b\][^\x07]*\x07") + line = osc_filter.sub("", line) + ansi_filter = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]") + line = ansi_filter.sub("", line) # Replace or strip sequence (at terminal width) - line = line.replace('\r\r\n', '\n') + line = line.replace("\r\r\n", "\n") if len(line) == 201: line = line[:-1] return line def _flush_stderr(self, session_process): - ''' read and return stderr with minimal blocking ''' + """read and return stderr with minimal blocking""" poll_stderr = select.poll() poll_stderr.register(session_process.stderr, select.POLLIN) - stderr = '' + stderr = "" while session_process.poll() is None: if not poll_stderr.poll(1): @@ -746,20 +785,20 @@ class Connection(ConnectionBase): return stderr def _get_url(self, client_method, bucket_name, out_path, http_method, extra_args=None): - ''' Generate URL for get_object / put_object ''' + """Generate URL for get_object / put_object""" client = self._s3_client - params = {'Bucket': bucket_name, 'Key': out_path} + params = {"Bucket": bucket_name, "Key": out_path} if extra_args is not None: params.update(extra_args) return client.generate_presigned_url(client_method, Params=params, ExpiresIn=3600, HttpMethod=http_method) def _get_boto_client(self, service, region_name=None, profile_name=None, endpoint_url=None): - ''' Gets a boto3 client based on the STS token ''' + """Gets a boto3 client based on the STS token""" - aws_access_key_id = self.get_option('access_key_id') - aws_secret_access_key = self.get_option('secret_access_key') - aws_session_token = self.get_option('session_token') + aws_access_key_id = self.get_option("access_key_id") + aws_secret_access_key = self.get_option("secret_access_key") + aws_session_token = self.get_option("session_token") session_args = dict( aws_access_key_id=aws_access_key_id, @@ -768,7 +807,7 @@ class Connection(ConnectionBase): region_name=region_name, ) if profile_name: - session_args['profile_name'] = profile_name + session_args["profile_name"] = profile_name session = boto3.session.Session(**session_args) client = session.client( @@ -776,8 +815,8 @@ class Connection(ConnectionBase): endpoint_url=endpoint_url, config=Config( signature_version="s3v4", - s3={'addressing_style': self.get_option('s3_addressing_style')} - ) + s3={"addressing_style": self.get_option("s3_addressing_style")}, + ), ) return client @@ -787,21 +826,21 @@ class Connection(ConnectionBase): def _generate_encryption_settings(self): put_args = {} put_headers = {} - if not self.get_option('bucket_sse_mode'): + if not self.get_option("bucket_sse_mode"): return put_args, put_headers - put_args['ServerSideEncryption'] = self.get_option('bucket_sse_mode') - put_headers['x-amz-server-side-encryption'] = self.get_option('bucket_sse_mode') - if self.get_option('bucket_sse_mode') == 'aws:kms' and self.get_option('bucket_sse_kms_key_id'): - put_args['SSEKMSKeyId'] = self.get_option('bucket_sse_kms_key_id') - put_headers['x-amz-server-side-encryption-aws-kms-key-id'] = self.get_option('bucket_sse_kms_key_id') + put_args["ServerSideEncryption"] = self.get_option("bucket_sse_mode") + put_headers["x-amz-server-side-encryption"] = self.get_option("bucket_sse_mode") + if self.get_option("bucket_sse_mode") == "aws:kms" and self.get_option("bucket_sse_kms_key_id"): + put_args["SSEKMSKeyId"] = self.get_option("bucket_sse_kms_key_id") + put_headers["x-amz-server-side-encryption-aws-kms-key-id"] = self.get_option("bucket_sse_kms_key_id") return put_args, put_headers def _generate_commands(self, bucket_name, s3_path, in_path, out_path): put_args, put_headers = self._generate_encryption_settings() - put_url = self._get_url('put_object', bucket_name, s3_path, 'PUT', extra_args=put_args) - get_url = self._get_url('get_object', bucket_name, s3_path, 'GET') + put_url = self._get_url("put_object", bucket_name, s3_path, "PUT", extra_args=put_args) + get_url = self._get_url("get_object", bucket_name, s3_path, "GET") if self.is_windows: put_command_headers = "; ".join([f"'{h}' = '{v}'" for h, v in put_headers.items()]) @@ -813,14 +852,14 @@ class Connection(ConnectionBase): f"-Uri '{put_url}' " f"-UseBasicParsing" ), - ] + ] # fmt: skip get_commands = [ ( "Invoke-WebRequest " f"'{get_url}' " f"-OutFile '{out_path}'" ), - ] + ] # fmt: skip else: put_command_headers = " ".join([f"-H '{h}: {v}'" for h, v in put_headers.items()]) put_commands = [ @@ -830,7 +869,7 @@ class Connection(ConnectionBase): f"--upload-file '{in_path}' " f"'{put_url}'" ), - ] + ] # fmt: skip get_commands = [ ( "curl " @@ -846,20 +885,18 @@ class Connection(ConnectionBase): "touch " f"'{out_path}'" ) - ] + ] # fmt: skip return get_commands, put_commands, put_args def _exec_transport_commands(self, in_path, out_path, commands): - stdout_combined, stderr_combined = '', '' + stdout_combined, stderr_combined = "", "" for command in commands: (returncode, stdout, stderr) = self.exec_command(command, in_data=None, sudoable=False) # Check the return code if returncode != 0: - raise AnsibleError( - f"failed to transfer file to {in_path} {out_path}:\n" - f"{stdout}\n{stderr}") + raise AnsibleError(f"failed to transfer file to {in_path} {out_path}:\n{stdout}\n{stderr}") stdout_combined += stdout stderr_combined += stderr @@ -868,24 +905,27 @@ class Connection(ConnectionBase): @_ssm_retry def _file_transport_command(self, in_path, out_path, ssm_action): - ''' transfer a file to/from host using an intermediate S3 bucket ''' + """transfer a file to/from host using an intermediate S3 bucket""" bucket_name = self.get_option("bucket_name") s3_path = self._escape_path(f"{self.instance_id}/{out_path}") get_commands, put_commands, put_args = self._generate_commands( - bucket_name, s3_path, in_path, out_path, + bucket_name, + s3_path, + in_path, + out_path, ) client = self._s3_client try: - if ssm_action == 'get': + if ssm_action == "get": (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, put_commands) - with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data: + with open(to_bytes(out_path, errors="surrogate_or_strict"), "wb") as data: client.download_fileobj(bucket_name, s3_path, data) else: - with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data: + with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as data: client.upload_fileobj(data, bucket_name, s3_path, ExtraArgs=put_args) (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, get_commands) return (returncode, stdout, stderr) @@ -894,28 +934,27 @@ class Connection(ConnectionBase): client.delete_object(Bucket=bucket_name, Key=s3_path) def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' + """transfer a file from local to remote""" super().put_file(in_path, out_path) self._vvv(f"PUT {in_path} TO {out_path}") - if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")): raise AnsibleFileNotFound(f"file or module does not exist: {in_path}") - return self._file_transport_command(in_path, out_path, 'put') + return self._file_transport_command(in_path, out_path, "put") def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' + """fetch a file from remote to local""" super().fetch_file(in_path, out_path) self._vvv(f"FETCH {in_path} TO {out_path}") - return self._file_transport_command(in_path, out_path, 'get') + return self._file_transport_command(in_path, out_path, "get") def close(self): - ''' terminate the connection ''' + """terminate the connection""" if self._session_id: - self._vvv(f"CLOSING SSM CONNECTION TO: {self.instance_id}") if self._timeout: self._session.terminate() @@ -925,4 +964,4 @@ class Connection(ConnectionBase): self._vvvv(f"TERMINATE SSM SESSION: {self._session_id}") self._client.terminate_session(SessionId=self._session_id) - self._session_id = '' + self._session_id = "" diff --git a/ansible_collections/community/aws/plugins/inventory/aws_mq.py b/ansible_collections/community/aws/plugins/inventory/aws_mq.py new file mode 100644 index 000000000..3ca1a6a97 --- /dev/null +++ b/ansible_collections/community/aws/plugins/inventory/aws_mq.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2023 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: aws_mq +version_added: 6.1.0 +short_description: MQ broker inventory source +description: + - Get brokers from Amazon Web Services MQ. + - Uses a YAML configuration file that ends with aws_mq.(yml|yaml). +options: + regions: + description: + - A list of regions in which to describe MQ brokers. Available regions are listed here + U(https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/) + type: list + elements: str + default: [] + strict_permissions: + description: By default if an AccessDenied exception is encountered this plugin will fail. You can set I(strict_permissions) to + C(False) in the inventory config file which will allow the restrictions to be gracefully skipped. + type: bool + default: True + statuses: + description: + - A list of desired states for brokers to be added to inventory. Set to C(['all']) as a shorthand to find everything. + Possible value are listed here U(https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-statuses.html) + type: list + elements: str + default: + - RUNNING + - CREATION_IN_PROGRESS + hostvars_prefix: + description: + - The prefix for host variables names coming from AWS. + type: str + hostvars_suffix: + description: + - The suffix for host variables names coming from AWS. + type: str +extends_documentation_fragment: + - inventory_cache + - constructed + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins + - amazon.aws.assume_role.plugins +author: + - Ali AlKhalidi (@doteast) +""" + +EXAMPLES = r""" +--- +# Minimal example using AWS credentials from environment vars or instance role credentials +# Get all brokers in us-east-1 region +plugin: community.aws.aws_mq +regions: + - ca-central-1 + +--- + +# Example multiple regions, ignoring permission errors, and only brokers with state RUNNING +plugin: community.aws.aws_mq +regions: + - us-east-1 + - us-east-2 +strict_permissions: false +statuses: + - RUNNING + +--- + +# Example group by engine, hostvars custom prefix-suffix, and compose variable from tags +plugin: community.aws.aws_mq +regions: + - ca-central-1 +keyed_groups: + - key: engine_type + prefix: mq +compose: + app: 'tags.Applications|split(",")' +hostvars_prefix: aws_ +hostvars_suffix: _mq +""" + +try: + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.plugin_utils.inventory import AWSInventoryBase + +broker_attr = [ + "MaintenanceWindowStartTime", + "AutoMinorVersionUpgrade", + "AuthenticationStrategy", + "PubliclyAccessible", + "EncryptionOptions", + "HostInstanceType", + "BrokerInstances", + "SecurityGroups", + "DeploymentMode", + "EngineVersion", + "StorageType", + "BrokerState", + "EngineType", + "SubnetIds", + "BrokerArn", + "BrokerId", + "Created", + "Logs", +] + +inventory_group = "aws_mq" + + +def _find_hosts_matching_statuses(hosts, statuses): + if not statuses: + statuses = ["RUNNING", "CREATION_IN_PROGRESS"] + if "all" in statuses: + return hosts + valid_hosts = [] + for host in hosts: + if host.get("BrokerState") in statuses: + valid_hosts.append(host) + return valid_hosts + + +def _get_mq_hostname(host): + if host.get("BrokerName"): + return host["BrokerName"] + + +def _get_broker_host_tags(detail): + tags = [] + if "Tags" in detail: + for key, value in detail["Tags"].items(): + tags.append({"Key": key, "Value": value}) + return tags + + +def _add_details_to_hosts(connection, hosts, strict): + for host in hosts: + detail = None + resource_id = host["BrokerId"] + try: + detail = connection.describe_broker(BrokerId=resource_id) + except is_boto3_error_code("AccessDenied") as e: + if not strict: + pass + else: + raise AnsibleError(f"Failed to query MQ: {to_native(e)}") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleError(f"Failed to query MQ: {to_native(e)}") + + if detail: + # special handling of tags + host["Tags"] = _get_broker_host_tags(detail) + + # collect rest of attributes + for attr in broker_attr: + if attr in detail: + host[attr] = detail[attr] + + +class InventoryModule(AWSInventoryBase): + NAME = "community.aws.aws_mq" + INVENTORY_FILE_SUFFIXES = ("aws_mq.yml", "aws_mq.yaml") + + def __init__(self): + super(InventoryModule, self).__init__() + + def _get_broker_hosts(self, connection, strict): + def _boto3_paginate_wrapper(func, *args, **kwargs): + results = [] + try: + results = func(*args, **kwargs) + results = results["BrokerSummaries"] + _add_details_to_hosts(connection, results, strict) + except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except + if not strict: + results = [] + else: + raise AnsibleError(f"Failed to query MQ: {to_native(e)}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleError(f"Failed to query MQ: {to_native(e)}") + return results + + return _boto3_paginate_wrapper + + def _get_all_hosts(self, regions, strict, statuses): + """ + :param regions: a list of regions in which to describe hosts + :param strict: a boolean determining whether to fail or ignore 403 error codes + :param statuses: a list of statuses that the returned hosts should match + :return A list of host dictionaries + """ + all_instances = [] + + for connection, _region in self.all_clients("mq"): + paginator = connection.get_paginator("list_brokers") + all_instances.extend(self._get_broker_hosts(connection, strict)(paginator.paginate().build_full_result)) + sorted_hosts = list(sorted(all_instances, key=lambda x: x["BrokerName"])) + return _find_hosts_matching_statuses(sorted_hosts, statuses) + + def _populate_from_cache(self, cache_data): + hostvars = cache_data.pop("_meta", {}).get("hostvars", {}) + for group in cache_data: + if group == "all": + continue + self.inventory.add_group(group) + hosts = cache_data[group].get("hosts", []) + for host in hosts: + self._populate_host_vars([host], hostvars.get(host, {}), group) + self.inventory.add_child("all", group) + + def _populate(self, hosts): + group = inventory_group + self.inventory.add_group(group) + if hosts: + self._add_hosts(hosts=hosts, group=group) + self.inventory.add_child("all", group) + + def _format_inventory(self, hosts): + results = {"_meta": {"hostvars": {}}} + group = inventory_group + results[group] = {"hosts": []} + for host in hosts: + hostname = _get_mq_hostname(host) + results[group]["hosts"].append(hostname) + h = self.inventory.get_host(hostname) + results["_meta"]["hostvars"][h.name] = h.vars + return results + + def _add_hosts(self, hosts, group): + """ + :param hosts: a list of hosts to add to the group + :param group: name of the group the host list belongs to + """ + for host in hosts: + hostname = _get_mq_hostname(host) + host = camel_dict_to_snake_dict(host, ignore_list=["Tags", "EngineType"]) + host["tags"] = boto3_tag_list_to_ansible_dict(host.get("tags", [])) + if host.get("engine_type"): + # align value with API spec of all upper + host["engine_type"] = host.get("engine_type", "").upper() + + self.inventory.add_host(hostname, group=group) + new_vars = dict() + hostvars_prefix = self.get_option("hostvars_prefix") + hostvars_suffix = self.get_option("hostvars_suffix") + for hostvar, hostval in host.items(): + if hostvars_prefix: + hostvar = hostvars_prefix + hostvar + if hostvars_suffix: + hostvar = hostvar + hostvars_suffix + new_vars[hostvar] = hostval + self.inventory.set_variable(hostname, hostvar, hostval) + host.update(new_vars) + + strict = self.get_option("strict") + self._set_composite_vars(self.get_option("compose"), host, hostname, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host, hostname, strict=strict) + + def parse(self, inventory, loader, path, cache=True): + super().parse(inventory, loader, path, cache=cache) + + # get user specifications + regions = self.get_option("regions") + strict_permissions = self.get_option("strict_permissions") + statuses = self.get_option("statuses") + + result_was_cached, results = self.get_cached_result(path, cache) + if result_was_cached: + self._populate_from_cache(results) + return + + results = self._get_all_hosts(regions, strict_permissions, statuses) + self._populate(results) + + formatted_inventory = self._format_inventory(results) + self.update_cached_result(path, cache, formatted_inventory) diff --git a/ansible_collections/community/aws/plugins/module_utils/base.py b/ansible_collections/community/aws/plugins/module_utils/base.py index 1ce732d7a..86b846c63 100644 --- a/ansible_collections/community/aws/plugins/module_utils/base.py +++ b/ansible_collections/community/aws/plugins/module_utils/base.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -7,9 +9,6 @@ # sense for it to start life in community.aws. # -from __future__ import absolute_import, division, print_function -__metaclass__ = type - from copy import deepcopy from functools import wraps @@ -23,7 +22,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -class BaseWaiterFactory(): +class BaseWaiterFactory: """ A helper class used for creating additional waiters. Unlike the waiters available directly from botocore these waiters will @@ -40,6 +39,7 @@ class BaseWaiterFactory(): waiter = waiters.get_waiter('my_waiter_name') waiter.wait(**params) """ + module = None client = None @@ -114,9 +114,14 @@ class BaseWaiterFactory(): def _inject_ratelimit_retries(self, model): extra_retries = [ - 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', - 'InternalFailure', 'InternalError', 'TooManyRequestsException', - 'Throttling'] + "RequestLimitExceeded", + "Unavailable", + "ServiceUnavailable", + "InternalFailure", + "InternalError", + "TooManyRequestsException", + "Throttling", + ] acceptors = [] for error in extra_retries: @@ -131,15 +136,15 @@ class BaseWaiterFactory(): def get_waiter(self, waiter_name): waiters = self._model.waiter_names if waiter_name not in waiters: - self.module.fail_json( - 'Unable to find waiter {0}. Available_waiters: {1}' - .format(waiter_name, waiters)) + self.module.fail_json(f"Unable to find waiter {waiter_name}. Available_waiters: {waiters}") return botocore.waiter.create_waiter_with_client( - waiter_name, self._model, self.client, + waiter_name, + self._model, + self.client, ) -class Boto3Mixin(): +class Boto3Mixin: @staticmethod def aws_error_handler(description): r""" @@ -177,11 +182,13 @@ class Boto3Mixin(): extra_ouput = _self._extra_error_output() try: return func(_self, *args, **kwargs) - except (botocore.exceptions.WaiterError) as e: - _self.module.fail_json_aws(e, msg='Failed waiting for {DESC}'.format(DESC=description), **extra_ouput) + except botocore.exceptions.WaiterError as e: + _self.module.fail_json_aws(e, msg=f"Failed waiting for {description}", **extra_ouput) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - _self.module.fail_json_aws(e, msg='Failed to {DESC}'.format(DESC=description), **extra_ouput) + _self.module.fail_json_aws(e, msg=f"Failed to {description}", **extra_ouput) + return handler + return wrapper def _normalize_boto3_resource(self, resource, add_tags=False): @@ -199,7 +206,7 @@ class Boto3Mixin(): if resource is None: return None - tags = resource.get('Tags', None) + tags = resource.get("Tags", None) if tags: tags = boto3_tag_list_to_ansible_dict(tags) elif add_tags or tags is not None: @@ -207,7 +214,7 @@ class Boto3Mixin(): normalized_resource = camel_dict_to_snake_dict(resource) if tags is not None: - normalized_resource['tags'] = tags + normalized_resource["tags"] = tags return normalized_resource def _extra_error_output(self): @@ -262,9 +269,9 @@ class BaseResourceManager(Boto3Mixin): params = dict() if self._wait_timeout: delay = min(5, self._wait_timeout) - max_attempts = (self._wait_timeout // delay) + max_attempts = self._wait_timeout // delay config = dict(Delay=delay, MaxAttempts=max_attempts) - params['WaiterConfig'] = config + params["WaiterConfig"] = config return params def _wait_for_deletion(self): @@ -347,8 +354,7 @@ class BaseResourceManager(Boto3Mixin): if immutable and self.original_resource: if description is None: description = key - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") self._resource_updates[key] = value self.changed = True return True diff --git a/ansible_collections/community/aws/plugins/module_utils/common.py b/ansible_collections/community/aws/plugins/module_utils/common.py new file mode 100644 index 000000000..0c4374729 --- /dev/null +++ b/ansible_collections/community/aws/plugins/module_utils/common.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +COMMUNITY_AWS_COLLECTION_NAME = "community.aws" +COMMUNITY_AWS_COLLECTION_VERSION = "7.1.0" diff --git a/ansible_collections/community/aws/plugins/module_utils/dynamodb.py b/ansible_collections/community/aws/plugins/module_utils/dynamodb.py new file mode 100644 index 000000000..d48029c1b --- /dev/null +++ b/ansible_collections/community/aws/plugins/module_utils/dynamodb.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory + + +class DynamodbWaiterFactory(BaseWaiterFactory): + def __init__(self, module): + # the AWSRetry wrapper doesn't support the wait functions (there's no + # public call we can cleanly wrap) + client = module.client("dynamodb") + super().__init__(module, client) + + @property + def _waiter_model_data(self): + data = super()._waiter_model_data + ddb_data = dict( + table_exists=dict( + operation="DescribeTable", + delay=20, + maxAttempts=25, + acceptors=[ + dict(expected="ACTIVE", matcher="path", state="success", argument="Table.TableStatus"), + dict(expected="ResourceNotFoundException", matcher="error", state="retry"), + ], + ), + table_not_exists=dict( + operation="DescribeTable", + delay=20, + maxAttempts=25, + acceptors=[ + dict(expected="ResourceNotFoundException", matcher="error", state="success"), + ], + ), + global_indexes_active=dict( + operation="DescribeTable", + delay=20, + maxAttempts=25, + acceptors=[ + dict(expected="ResourceNotFoundException", matcher="error", state="failure"), + # If there are no secondary indexes, simply return + dict( + expected=False, + matcher="path", + state="success", + argument="contains(keys(Table), `GlobalSecondaryIndexes`)", + ), + dict( + expected="ACTIVE", + matcher="pathAll", + state="success", + argument="Table.GlobalSecondaryIndexes[].IndexStatus", + ), + dict( + expected="CREATING", + matcher="pathAny", + state="retry", + argument="Table.GlobalSecondaryIndexes[].IndexStatus", + ), + dict( + expected="UPDATING", + matcher="pathAny", + state="retry", + argument="Table.GlobalSecondaryIndexes[].IndexStatus", + ), + dict( + expected="DELETING", + matcher="pathAny", + state="retry", + argument="Table.GlobalSecondaryIndexes[].IndexStatus", + ), + dict( + expected=True, + matcher="path", + state="success", + argument="length(Table.GlobalSecondaryIndexes) == `0`", + ), + ], + ), + ) + data.update(ddb_data) + return data + + +def _do_wait(module, waiter_name, action_description, wait_timeout, table_name): + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + try: + waiter = DynamodbWaiterFactory(module).get_waiter(waiter_name) + waiter.wait( + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, + TableName=table_name, + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg=f"Timeout while waiting for {action_description}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Failed while waiting for {action_description}") + + +def wait_table_exists(module, wait_timeout, table_name): + _do_wait(module, "table_exists", "table creation", wait_timeout, table_name) + + +def wait_table_not_exists(module, wait_timeout, table_name): + _do_wait(module, "table_not_exists", "table deletion", wait_timeout, table_name) + + +def wait_indexes_active(module, wait_timeout, table_name): + _do_wait(module, "global_indexes_active", "secondary index updates", wait_timeout, table_name) diff --git a/ansible_collections/community/aws/plugins/module_utils/ec2.py b/ansible_collections/community/aws/plugins/module_utils/ec2.py index 5ae789857..59b617f20 100644 --- a/ansible_collections/community/aws/plugins/module_utils/ec2.py +++ b/ansible_collections/community/aws/plugins/module_utils/ec2.py @@ -1,18 +1,17 @@ +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - from copy import deepcopy -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.community.aws.plugins.module_utils.base import BaseResourceManager from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory @@ -23,7 +22,7 @@ class Ec2WaiterFactory(BaseWaiterFactory): def __init__(self, module): # the AWSRetry wrapper doesn't support the wait functions (there's no # public call we can cleanly wrap) - client = module.client('ec2') + client = module.client("ec2") super(Ec2WaiterFactory, self).__init__(module, client) @property @@ -33,30 +32,28 @@ class Ec2WaiterFactory(BaseWaiterFactory): class Ec2Boto3Mixin(Boto3Mixin): - @AWSRetry.jittered_backoff() def _paginated_describe_subnets(self, **params): - paginator = self.client.get_paginator('describe_subnets') + paginator = self.client.get_paginator("describe_subnets") return paginator.paginate(**params).build_full_result() - @Boto3Mixin.aws_error_handler('describe subnets') + @Boto3Mixin.aws_error_handler("describe subnets") def _describe_subnets(self, **params): try: result = self._paginated_describe_subnets(**params) - except is_boto3_error_code('SubnetID.NotFound'): + except is_boto3_error_code("SubnetID.NotFound"): return None - return result.get('Subnets', None) + return result.get("Subnets", None) class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): - resource_id = None TAG_RESOURCE_TYPE = None # This can be overridden by a subclass *if* 'Tags' isn't returned as a part of # the standard Resource description TAGS_ON_RESOURCE = True # If the resource supports using "TagSpecifications" on creation we can - TAGS_ON_CREATE = 'TagSpecifications' + TAGS_ON_CREATE = "TagSpecifications" def __init__(self, module, id=None): r""" @@ -79,27 +76,27 @@ class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): changed |= super(BaseEc2Manager, self)._flush_update() return changed - @Boto3Mixin.aws_error_handler('connect to AWS') - def _create_client(self, client_name='ec2'): + @Boto3Mixin.aws_error_handler("connect to AWS") + def _create_client(self, client_name="ec2"): client = self.module.client(client_name, retry_decorator=AWSRetry.jittered_backoff()) return client - @Boto3Mixin.aws_error_handler('set tags on resource') + @Boto3Mixin.aws_error_handler("set tags on resource") def _add_tags(self, **params): self.client.create_tags(aws_retry=True, **params) return True - @Boto3Mixin.aws_error_handler('unset tags on resource') + @Boto3Mixin.aws_error_handler("unset tags on resource") def _remove_tags(self, **params): self.client.delete_tags(aws_retry=True, **params) return True @AWSRetry.jittered_backoff() def _paginated_describe_tags(self, **params): - paginator = self.client.get_paginator('describe_tags') + paginator = self.client.get_paginator("describe_tags") return paginator.paginate(**params).build_full_result() - @Boto3Mixin.aws_error_handler('list tags on resource') + @Boto3Mixin.aws_error_handler("list tags on resource") def _describe_tags(self, id=None): if not id: id = self.resource_id @@ -112,7 +109,7 @@ class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): id = self.resource_id # If the Tags are available from the resource, then use them if self.TAGS_ON_RESOURCE: - tags = self._preupdate_resource.get('Tags', []) + tags = self._preupdate_resource.get("Tags", []) # Otherwise we'll have to look them up else: tags = self._describe_tags(id=id) @@ -120,8 +117,8 @@ class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): def _do_tagging(self): changed = False - tags_to_add = self._tagging_updates.get('add') - tags_to_remove = self._tagging_updates.get('remove') + tags_to_add = self._tagging_updates.get("add") + tags_to_remove = self._tagging_updates.get("remove") if tags_to_add: changed = True @@ -137,25 +134,22 @@ class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): return changed def _merge_resource_changes(self, filter_immutable=True, creation=False): - resource = super(BaseEc2Manager, self)._merge_resource_changes( - filter_immutable=filter_immutable, - creation=creation + filter_immutable=filter_immutable, creation=creation ) if creation: if not self.TAGS_ON_CREATE: - resource.pop('Tags', None) - elif self.TAGS_ON_CREATE == 'TagSpecifications': - tags = boto3_tag_list_to_ansible_dict(resource.pop('Tags', [])) + resource.pop("Tags", None) + elif self.TAGS_ON_CREATE == "TagSpecifications": + tags = boto3_tag_list_to_ansible_dict(resource.pop("Tags", [])) tag_specs = boto3_tag_specifications(tags, types=[self.TAG_RESOURCE_TYPE]) if tag_specs: - resource['TagSpecifications'] = tag_specs + resource["TagSpecifications"] = tag_specs return resource def set_tags(self, tags, purge_tags): - if tags is None: return False changed = False @@ -174,16 +168,16 @@ class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags) if tags_to_add: - self._tagging_updates['add'] = tags_to_add + self._tagging_updates["add"] = tags_to_add changed = True if tags_to_remove: - self._tagging_updates['remove'] = tags_to_remove + self._tagging_updates["remove"] = tags_to_remove changed = True if changed: # Tags are a stored as a list, but treated like a list, the # simplisic '==' in _set_resource_value doesn't do the comparison # properly - return self._set_resource_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags)) + return self._set_resource_value("Tags", ansible_dict_to_boto3_tag_list(desired_tags)) return False diff --git a/ansible_collections/community/aws/plugins/module_utils/etag.py b/ansible_collections/community/aws/plugins/module_utils/etag.py index a8cab5082..95c5ac94f 100644 --- a/ansible_collections/community/aws/plugins/module_utils/etag.py +++ b/ansible_collections/community/aws/plugins/module_utils/etag.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py # # calculate_multipart_etag Copyright (C) 2015 @@ -22,6 +24,7 @@ import hashlib try: from boto3.s3.transfer import TransferConfig + DEFAULT_CHUNK_SIZE = TransferConfig().multipart_chunksize except ImportError: DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024 @@ -40,23 +43,22 @@ def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): md5s = [] - with open(source_path, 'rb') as fp: + with open(source_path, "rb") as fp: while True: - data = fp.read(chunk_size) if not data: break - md5 = hashlib.new('md5', usedforsecurity=False) + md5 = hashlib.new("md5", usedforsecurity=False) md5.update(data) md5s.append(md5) if len(md5s) == 1: - new_etag = '"{0}"'.format(md5s[0].hexdigest()) + new_etag = f'"{md5s[0].hexdigest()}"' else: # > 1 digests = b"".join(m.digest() for m in md5s) new_md5 = hashlib.md5(digests) - new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s)) + new_etag = f'"{new_md5.hexdigest()}-{len(md5s)}"' return new_etag diff --git a/ansible_collections/community/aws/plugins/module_utils/modules.py b/ansible_collections/community/aws/plugins/module_utils/modules.py new file mode 100644 index 000000000..2d484aa1a --- /dev/null +++ b/ansible_collections/community/aws/plugins/module_utils/modules.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible_collections.amazon.aws.plugins.module_utils.common import set_collection_info +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +from ansible_collections.community.aws.plugins.module_utils.common import COMMUNITY_AWS_COLLECTION_NAME +from ansible_collections.community.aws.plugins.module_utils.common import COMMUNITY_AWS_COLLECTION_VERSION + + +class AnsibleCommunityAWSModule(AnsibleAWSModule): + def __init__(self, **kwargs): + super(AnsibleCommunityAWSModule, self).__init__(**kwargs) + set_collection_info( + collection_name=COMMUNITY_AWS_COLLECTION_NAME, + collection_version=COMMUNITY_AWS_COLLECTION_VERSION, + ) diff --git a/ansible_collections/community/aws/plugins/module_utils/networkfirewall.py b/ansible_collections/community/aws/plugins/module_utils/networkfirewall.py index 920c9f092..19a372514 100644 --- a/ansible_collections/community/aws/plugins/module_utils/networkfirewall.py +++ b/ansible_collections/community/aws/plugins/module_utils/networkfirewall.py @@ -1,27 +1,25 @@ +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from copy import deepcopy import time +from copy import deepcopy from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.community.aws.plugins.module_utils.base import Boto3Mixin from ansible_collections.community.aws.plugins.module_utils.base import BaseResourceManager from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory - +from ansible_collections.community.aws.plugins.module_utils.base import Boto3Mixin from ansible_collections.community.aws.plugins.module_utils.ec2 import BaseEc2Manager @@ -63,7 +61,7 @@ class NetworkFirewallWaiterFactory(BaseWaiterFactory): def __init__(self, module): # the AWSRetry wrapper doesn't support the wait functions (there's no # public call we can cleanly wrap) - client = module.client('network-firewall') + client = module.client("network-firewall") super(NetworkFirewallWaiterFactory, self).__init__(module, client) @property @@ -71,63 +69,104 @@ class NetworkFirewallWaiterFactory(BaseWaiterFactory): data = super(NetworkFirewallWaiterFactory, self)._waiter_model_data nw_data = dict( rule_group_active=dict( - operation='DescribeRuleGroup', - delay=5, maxAttempts=120, + operation="DescribeRuleGroup", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='RuleGroupResponse.RuleGroupStatus'), - dict(state='success', matcher='path', expected='ACTIVE', argument='RuleGroupResponse.RuleGroupStatus'), - ] + dict( + state="failure", + matcher="path", + expected="DELETING", + argument="RuleGroupResponse.RuleGroupStatus", + ), + dict( + state="success", matcher="path", expected="ACTIVE", argument="RuleGroupResponse.RuleGroupStatus" + ), + ], ), rule_group_deleted=dict( - operation='DescribeRuleGroup', - delay=5, maxAttempts=120, + operation="DescribeRuleGroup", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='retry', matcher='path', expected='DELETING', argument='RuleGroupResponse.RuleGroupStatus'), - dict(state='success', matcher='error', expected='ResourceNotFoundException'), - ] + dict( + state="retry", matcher="path", expected="DELETING", argument="RuleGroupResponse.RuleGroupStatus" + ), + dict(state="success", matcher="error", expected="ResourceNotFoundException"), + ], ), policy_active=dict( - operation='DescribeFirewallPolicy', - delay=5, maxAttempts=120, + operation="DescribeFirewallPolicy", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='FirewallPolicyResponse.FirewallPolicyStatus'), - dict(state='success', matcher='path', expected='ACTIVE', argument='FirewallPolicyResponse.FirewallPolicyStatus'), - ] + dict( + state="failure", + matcher="path", + expected="DELETING", + argument="FirewallPolicyResponse.FirewallPolicyStatus", + ), + dict( + state="success", + matcher="path", + expected="ACTIVE", + argument="FirewallPolicyResponse.FirewallPolicyStatus", + ), + ], ), policy_deleted=dict( - operation='DescribeFirewallPolicy', - delay=5, maxAttempts=120, + operation="DescribeFirewallPolicy", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='retry', matcher='path', expected='DELETING', argument='FirewallPolicyResponse.FirewallPolicyStatus'), - dict(state='success', matcher='error', expected='ResourceNotFoundException'), - ] + dict( + state="retry", + matcher="path", + expected="DELETING", + argument="FirewallPolicyResponse.FirewallPolicyStatus", + ), + dict(state="success", matcher="error", expected="ResourceNotFoundException"), + ], ), firewall_active=dict( - operation='DescribeFirewall', - delay=5, maxAttempts=120, + operation="DescribeFirewall", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='FirewallStatus.Status'), - dict(state='retry', matcher='path', expected='PROVISIONING', argument='FirewallStatus.Status'), - dict(state='success', matcher='path', expected='READY', argument='FirewallStatus.Status'), - ] + dict(state="failure", matcher="path", expected="DELETING", argument="FirewallStatus.Status"), + dict(state="retry", matcher="path", expected="PROVISIONING", argument="FirewallStatus.Status"), + dict(state="success", matcher="path", expected="READY", argument="FirewallStatus.Status"), + ], ), firewall_updated=dict( - operation='DescribeFirewall', - delay=5, maxAttempts=240, + operation="DescribeFirewall", + delay=5, + maxAttempts=240, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='FirewallStatus.Status'), - dict(state='retry', matcher='path', expected='PROVISIONING', argument='FirewallStatus.Status'), - dict(state='retry', matcher='path', expected='PENDING', argument='FirewallStatus.ConfigurationSyncStateSummary'), - dict(state='success', matcher='path', expected='IN_SYNC', argument='FirewallStatus.ConfigurationSyncStateSummary'), - ] + dict(state="failure", matcher="path", expected="DELETING", argument="FirewallStatus.Status"), + dict(state="retry", matcher="path", expected="PROVISIONING", argument="FirewallStatus.Status"), + dict( + state="retry", + matcher="path", + expected="PENDING", + argument="FirewallStatus.ConfigurationSyncStateSummary", + ), + dict( + state="success", + matcher="path", + expected="IN_SYNC", + argument="FirewallStatus.ConfigurationSyncStateSummary", + ), + ], ), firewall_deleted=dict( - operation='DescribeFirewall', - delay=5, maxAttempts=240, + operation="DescribeFirewall", + delay=5, + maxAttempts=240, acceptors=[ - dict(state='retry', matcher='path', expected='DELETING', argument='FirewallStatus.Status'), - dict(state='success', matcher='error', expected='ResourceNotFoundException'), - ] + dict(state="retry", matcher="path", expected="DELETING", argument="FirewallStatus.Status"), + dict(state="success", matcher="error", expected="ResourceNotFoundException"), + ], ), ) data.update(nw_data) @@ -150,65 +189,65 @@ class NFRuleGroupBoto3Mixin(NetworkFirewallBoto3Mixin): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_list_rule_groups(self, **params): - paginator = self.client.get_paginator('list_rule_groups') + paginator = self.client.get_paginator("list_rule_groups") result = paginator.paginate(**params).build_full_result() - return result.get('RuleGroups', None) + return result.get("RuleGroups", None) - @Boto3Mixin.aws_error_handler('list all rule groups') + @Boto3Mixin.aws_error_handler("list all rule groups") def _list_rule_groups(self, **params): return self._paginated_list_rule_groups(**params) - @Boto3Mixin.aws_error_handler('describe rule group') + @Boto3Mixin.aws_error_handler("describe rule group") def _describe_rule_group(self, **params): try: result = self.client.describe_rule_group(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - rule_group = result.get('RuleGroup', None) - metadata = result.get('RuleGroupResponse', None) + rule_group = result.get("RuleGroup", None) + metadata = result.get("RuleGroupResponse", None) return dict(RuleGroup=rule_group, RuleGroupMetadata=metadata) - @Boto3Mixin.aws_error_handler('create rule group') + @Boto3Mixin.aws_error_handler("create rule group") def _create_rule_group(self, **params): result = self.client.create_rule_group(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('RuleGroupResponse', None) + return result.get("RuleGroupResponse", None) - @Boto3Mixin.aws_error_handler('update rule group') + @Boto3Mixin.aws_error_handler("update rule group") def _update_rule_group(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_rule_group(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('RuleGroupResponse', None) + return result.get("RuleGroupResponse", None) - @Boto3Mixin.aws_error_handler('delete rule group') + @Boto3Mixin.aws_error_handler("delete rule group") def _delete_rule_group(self, **params): try: result = self.client.delete_rule_group(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('RuleGroupResponse', None) + return result.get("RuleGroupResponse", None) - @Boto3Mixin.aws_error_handler('firewall rule to finish deleting') + @Boto3Mixin.aws_error_handler("firewall rule to finish deleting") def _wait_rule_group_deleted(self, **params): - waiter = self.nf_waiter_factory.get_waiter('rule_group_deleted') + waiter = self.nf_waiter_factory.get_waiter("rule_group_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall rule to become active') + @Boto3Mixin.aws_error_handler("firewall rule to become active") def _wait_rule_group_active(self, **params): - waiter = self.nf_waiter_factory.get_waiter('rule_group_active') + waiter = self.nf_waiter_factory.get_waiter("rule_group_active") waiter.wait(**params) @@ -217,65 +256,65 @@ class NFPolicyBoto3Mixin(NetworkFirewallBoto3Mixin): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_list_policies(self, **params): - paginator = self.client.get_paginator('list_firewall_policies') + paginator = self.client.get_paginator("list_firewall_policies") result = paginator.paginate(**params).build_full_result() - return result.get('FirewallPolicies', None) + return result.get("FirewallPolicies", None) - @Boto3Mixin.aws_error_handler('list all firewall policies') + @Boto3Mixin.aws_error_handler("list all firewall policies") def _list_policies(self, **params): return self._paginated_list_policies(**params) - @Boto3Mixin.aws_error_handler('describe firewall policy') + @Boto3Mixin.aws_error_handler("describe firewall policy") def _describe_policy(self, **params): try: result = self.client.describe_firewall_policy(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - policy = result.get('FirewallPolicy', None) - metadata = result.get('FirewallPolicyResponse', None) + policy = result.get("FirewallPolicy", None) + metadata = result.get("FirewallPolicyResponse", None) return dict(FirewallPolicy=policy, FirewallPolicyMetadata=metadata) - @Boto3Mixin.aws_error_handler('create firewall policy') + @Boto3Mixin.aws_error_handler("create firewall policy") def _create_policy(self, **params): result = self.client.create_firewall_policy(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallPolicyResponse', None) + return result.get("FirewallPolicyResponse", None) - @Boto3Mixin.aws_error_handler('update firewall policy') + @Boto3Mixin.aws_error_handler("update firewall policy") def _update_policy(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_policy(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallPolicyResponse', None) + return result.get("FirewallPolicyResponse", None) - @Boto3Mixin.aws_error_handler('delete firewall policy') + @Boto3Mixin.aws_error_handler("delete firewall policy") def _delete_policy(self, **params): try: result = self.client.delete_firewall_policy(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('FirewallPolicyResponse', None) + return result.get("FirewallPolicyResponse", None) - @Boto3Mixin.aws_error_handler('firewall policy to finish deleting') + @Boto3Mixin.aws_error_handler("firewall policy to finish deleting") def _wait_policy_deleted(self, **params): - waiter = self.nf_waiter_factory.get_waiter('policy_deleted') + waiter = self.nf_waiter_factory.get_waiter("policy_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall policy to become active') + @Boto3Mixin.aws_error_handler("firewall policy to become active") def _wait_policy_active(self, **params): - waiter = self.nf_waiter_factory.get_waiter('policy_active') + waiter = self.nf_waiter_factory.get_waiter("policy_active") waiter.wait(**params) @@ -284,136 +323,136 @@ class NFFirewallBoto3Mixin(NetworkFirewallBoto3Mixin): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_list_firewalls(self, **params): - paginator = self.client.get_paginator('list_firewalls') + paginator = self.client.get_paginator("list_firewalls") result = paginator.paginate(**params).build_full_result() - return result.get('Firewalls', None) + return result.get("Firewalls", None) - @Boto3Mixin.aws_error_handler('list all firewalls') + @Boto3Mixin.aws_error_handler("list all firewalls") def _list_firewalls(self, **params): return self._paginated_list_firewalls(**params) - @Boto3Mixin.aws_error_handler('describe firewall') + @Boto3Mixin.aws_error_handler("describe firewall") def _describe_firewall(self, **params): try: result = self.client.describe_firewall(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - firewall = result.get('Firewall', None) - metadata = result.get('FirewallStatus', None) + firewall = result.get("Firewall", None) + metadata = result.get("FirewallStatus", None) return dict(Firewall=firewall, FirewallMetadata=metadata) - @Boto3Mixin.aws_error_handler('create firewall') + @Boto3Mixin.aws_error_handler("create firewall") def _create_firewall(self, **params): result = self.client.create_firewall(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallStatus', None) + return result.get("FirewallStatus", None) - @Boto3Mixin.aws_error_handler('update firewall description') + @Boto3Mixin.aws_error_handler("update firewall description") def _update_firewall_description(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_description(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('update firewall subnet change protection') + @Boto3Mixin.aws_error_handler("update firewall subnet change protection") def _update_subnet_change_protection(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_subnet_change_protection(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('update firewall policy change protection') + @Boto3Mixin.aws_error_handler("update firewall policy change protection") def _update_firewall_policy_change_protection(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_policy_change_protection(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('update firewall deletion protection') + @Boto3Mixin.aws_error_handler("update firewall deletion protection") def _update_firewall_delete_protection(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_delete_protection(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('associate policy with firewall') + @Boto3Mixin.aws_error_handler("associate policy with firewall") def _associate_firewall_policy(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.associate_firewall_policy(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('associate subnets with firewall') + @Boto3Mixin.aws_error_handler("associate subnets with firewall") def _associate_subnets(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.associate_subnets(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('disassociate subnets from firewall') + @Boto3Mixin.aws_error_handler("disassociate subnets from firewall") def _disassociate_subnets(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.disassociate_subnets(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('delete firewall') + @Boto3Mixin.aws_error_handler("delete firewall") def _delete_firewall(self, **params): try: result = self.client.delete_firewall(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('FirewallStatus', None) + return result.get("FirewallStatus", None) - @Boto3Mixin.aws_error_handler('firewall to finish deleting') + @Boto3Mixin.aws_error_handler("firewall to finish deleting") def _wait_firewall_deleted(self, **params): - waiter = self.nf_waiter_factory.get_waiter('firewall_deleted') + waiter = self.nf_waiter_factory.get_waiter("firewall_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall to finish updating') + @Boto3Mixin.aws_error_handler("firewall to finish updating") def _wait_firewall_updated(self, **params): - waiter = self.nf_waiter_factory.get_waiter('firewall_updated') + waiter = self.nf_waiter_factory.get_waiter("firewall_updated") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall to become active') + @Boto3Mixin.aws_error_handler("firewall to become active") def _wait_firewall_active(self, **params): - waiter = self.nf_waiter_factory.get_waiter('firewall_active') + waiter = self.nf_waiter_factory.get_waiter("firewall_active") waiter.wait(**params) @@ -433,8 +472,8 @@ class BaseNetworkFirewallManager(BaseResourceManager): self._metadata_updates = dict() self._tagging_updates = dict() - @Boto3Mixin.aws_error_handler('connect to AWS') - def _create_client(self, client_name='network-firewall'): + @Boto3Mixin.aws_error_handler("connect to AWS") + def _create_client(self, client_name="network-firewall"): client = self.module.client(client_name, retry_decorator=AWSRetry.jittered_backoff()) return client @@ -473,7 +512,9 @@ class BaseNetworkFirewallManager(BaseResourceManager): Removes information from the metadata which can't be updated. Returns a *copy* of the metadata dictionary. """ - return deepcopy(metadata) + meta = deepcopy(metadata) + meta.pop("LastModifiedTime", None) + return meta def _flush_create(self): changed = super(BaseNetworkFirewallManager, self)._flush_create() @@ -485,18 +526,18 @@ class BaseNetworkFirewallManager(BaseResourceManager): self._metadata_updates = dict() return changed - @BaseResourceManager.aws_error_handler('set tags on resource') + @BaseResourceManager.aws_error_handler("set tags on resource") def _add_tags(self, **params): self.client.tag_resource(aws_retry=True, **params) return True - @BaseResourceManager.aws_error_handler('unset tags on resource') + @BaseResourceManager.aws_error_handler("unset tags on resource") def _remove_tags(self, **params): self.client.untag_resource(aws_retry=True, **params) return True def _get_preupdate_arn(self): - return self._preupdate_metadata.get('Arn') + return self._preupdate_metadata.get("Arn") def _set_metadata_value(self, key, value, description=None, immutable=False): if value is None: @@ -506,8 +547,7 @@ class BaseNetworkFirewallManager(BaseResourceManager): if immutable and self.original_resource: if description is None: description = key - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") self._metadata_updates[key] = value self.changed = True return True @@ -516,15 +556,15 @@ class BaseNetworkFirewallManager(BaseResourceManager): return self._metadata_updates.get(key, self._preupdate_metadata.get(key, default)) def _set_tag_values(self, desired_tags): - return self._set_metadata_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags)) + return self._set_metadata_value("Tags", ansible_dict_to_boto3_tag_list(desired_tags)) def _get_tag_values(self): - return self._get_metadata_value('Tags', []) + return self._get_metadata_value("Tags", []) def _flush_tagging(self): changed = False - tags_to_add = self._tagging_updates.get('add') - tags_to_remove = self._tagging_updates.get('remove') + tags_to_add = self._tagging_updates.get("add") + tags_to_remove = self._tagging_updates.get("remove") resource_arn = self._get_preupdate_arn() if not resource_arn: @@ -543,7 +583,6 @@ class BaseNetworkFirewallManager(BaseResourceManager): return changed def set_tags(self, tags, purge_tags): - if tags is None: return False changed = False @@ -562,10 +601,10 @@ class BaseNetworkFirewallManager(BaseResourceManager): tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags) if tags_to_add: - self._tagging_updates['add'] = tags_to_add + self._tagging_updates["add"] = tags_to_add changed = True if tags_to_remove: - self._tagging_updates['remove'] = tags_to_remove + self._tagging_updates["remove"] = tags_to_remove changed = True if changed: @@ -578,9 +617,7 @@ class BaseNetworkFirewallManager(BaseResourceManager): class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManager): - - RULE_TYPES = frozenset(['StatelessRulesAndCustomActions', 'StatefulRules', - 'RulesSourceList', 'RulesString']) + RULE_TYPES = frozenset(["StatelessRulesAndCustomActions", "StatefulRules", "RulesSourceList", "RulesString"]) name = None rule_type = None @@ -599,28 +636,28 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag def _extra_error_output(self): output = super(NetworkFirewallRuleManager, self)._extra_error_output() if self.name: - output['RuleGroupName'] = self.name + output["RuleGroupName"] = self.name if self.rule_type: - output['Type'] = self.rule_type + output["Type"] = self.rule_type if self.arn: - output['RuleGroupArn'] = self.arn + output["RuleGroupArn"] = self.arn return output def _filter_immutable_metadata_attributes(self, metadata): metadata = super(NetworkFirewallRuleManager, self)._filter_immutable_metadata_attributes(metadata) - metadata.pop('RuleGroupArn', None) - metadata.pop('RuleGroupName', None) - metadata.pop('RuleGroupId', None) - metadata.pop('Type', None) - metadata.pop('Capacity', None) - metadata.pop('RuleGroupStatus', None) - metadata.pop('Tags', None) - metadata.pop('ConsumedCapacity', None) - metadata.pop('NumberOfAssociations', None) + metadata.pop("RuleGroupArn", None) + metadata.pop("RuleGroupName", None) + metadata.pop("RuleGroupId", None) + metadata.pop("Type", None) + metadata.pop("Capacity", None) + metadata.pop("RuleGroupStatus", None) + metadata.pop("Tags", None) + metadata.pop("ConsumedCapacity", None) + metadata.pop("NumberOfAssociations", None) return metadata def _get_preupdate_arn(self): - return self._get_metadata_value('RuleGroupArn') + return self._get_metadata_value("RuleGroupArn") def _get_id_params(self, name=None, rule_type=None, arn=None): if arn: @@ -635,7 +672,7 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag rule_type = rule_type.upper() if not rule_type or not name: # Users should never see this, but let's cover ourself - self.module.fail_json(msg='Rule identifier parameters missing') + self.module.fail_json(msg="Rule identifier parameters missing") return dict(RuleGroupName=name, Type=rule_type) @staticmethod @@ -647,7 +684,6 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag return {k: dict(Definition=_string_list(v)) for (k, v) in variables.items()} def delete(self, name=None, rule_type=None, arn=None): - id_params = self._get_id_params(name=name, rule_type=rule_type, arn=arn) result = self._get_rule_group(**id_params) @@ -657,8 +693,8 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag self.updated_resource = dict() # Rule Group is already in the process of being deleted (takes time) - rule_status = self._get_metadata_value('RuleGroupStatus', '').upper() - if rule_status == 'DELETING': + rule_status = self._get_metadata_value("RuleGroupStatus", "").upper() + if rule_status == "DELETING": self._wait_for_deletion() return False @@ -675,37 +711,37 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag params = dict() if scope: scope = scope.upper() - params['Scope'] = scope + params["Scope"] = scope rule_groups = self._list_rule_groups(**params) if not rule_groups: return list() - return [r.get('Arn', None) for r in rule_groups] + return [r.get("Arn", None) for r in rule_groups] def _normalize_rule_variable(self, variable): if variable is None: return None - return {k: variable.get(k, dict()).get('Definition', []) for k in variable.keys()} + return {k: variable.get(k, dict()).get("Definition", []) for k in variable.keys()} def _normalize_rule_variables(self, variables): if variables is None: return None result = dict() - ip_sets = self._normalize_rule_variable(variables.get('IPSets', None)) + ip_sets = self._normalize_rule_variable(variables.get("IPSets", None)) if ip_sets: - result['ip_sets'] = ip_sets - port_sets = self._normalize_rule_variable(variables.get('PortSets', None)) + result["ip_sets"] = ip_sets + port_sets = self._normalize_rule_variable(variables.get("PortSets", None)) if port_sets: - result['port_sets'] = port_sets + result["port_sets"] = port_sets return result def _normalize_rule_group(self, rule_group): if rule_group is None: return None - rule_variables = self._normalize_rule_variables(rule_group.get('RuleVariables', None)) + rule_variables = self._normalize_rule_variables(rule_group.get("RuleVariables", None)) rule_group = self._normalize_boto3_resource(rule_group) if rule_variables is not None: - rule_group['rule_variables'] = rule_variables + rule_group["rule_variables"] = rule_variables return rule_group def _normalize_rule_group_metadata(self, rule_group_metadata): @@ -714,20 +750,19 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag def _normalize_rule_group_result(self, result): if result is None: return None - rule_group = self._normalize_rule_group(result.get('RuleGroup', None)) - rule_group_metadata = self._normalize_rule_group_metadata(result.get('RuleGroupMetadata', None)) + rule_group = self._normalize_rule_group(result.get("RuleGroup", None)) + rule_group_metadata = self._normalize_rule_group_metadata(result.get("RuleGroupMetadata", None)) result = camel_dict_to_snake_dict(result) if rule_group: - result['rule_group'] = rule_group + result["rule_group"] = rule_group if rule_group_metadata: - result['rule_group_metadata'] = rule_group_metadata + result["rule_group_metadata"] = rule_group_metadata return result def _normalize_resource(self, resource): return self._normalize_rule_group_result(resource) def get_rule_group(self, name=None, rule_type=None, arn=None): - id_params = self._get_id_params(name=name, rule_type=rule_type, arn=arn) result = self._get_rule_group(**id_params) @@ -738,35 +773,32 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag return rule_group def set_description(self, description): - return self._set_metadata_value('Description', description) + return self._set_metadata_value("Description", description) def set_capacity(self, capacity): - return self._set_metadata_value( - 'Capacity', capacity, - description="Reserved Capacity", immutable=True) + return self._set_metadata_value("Capacity", capacity, description="Reserved Capacity", immutable=True) def _set_rule_option(self, option_name, description, value, immutable=False, default_value=None): if value is None: return False - rule_options = deepcopy(self._get_resource_value('StatefulRuleOptions', dict())) + rule_options = deepcopy(self._get_resource_value("StatefulRuleOptions", dict())) if value == rule_options.get(option_name, default_value): return False if immutable and self.original_resource: - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") rule_options[option_name] = value - return self._set_resource_value('StatefulRuleOptions', rule_options) + return self._set_resource_value("StatefulRuleOptions", rule_options) def set_rule_order(self, order): RULE_ORDER_MAP = { - 'default': 'DEFAULT_ACTION_ORDER', - 'strict': 'STRICT_ORDER', + "default": "DEFAULT_ACTION_ORDER", + "strict": "STRICT_ORDER", } value = RULE_ORDER_MAP.get(order) - changed = self._set_rule_option('RuleOrder', 'Rule order', value, True, 'DEFAULT_ACTION_ORDER') + changed = self._set_rule_option("RuleOrder", "Rule order", value, True, "DEFAULT_ACTION_ORDER") self.changed |= changed return changed @@ -776,7 +808,7 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag variables = self._transform_rule_variables(variables) - all_variables = deepcopy(self._get_resource_value('RuleVariables', self._empty_rule_variables())) + all_variables = deepcopy(self._get_resource_value("RuleVariables", self._empty_rule_variables())) current_variables = all_variables.get(set_name, dict()) updated_variables = _merge_dict(current_variables, variables, purge) @@ -786,49 +818,50 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag all_variables[set_name] = updated_variables - return self._set_resource_value('RuleVariables', all_variables) + return self._set_resource_value("RuleVariables", all_variables) def set_ip_variables(self, variables, purge): - return self._set_rule_variables('IPSets', variables, purge) + return self._set_rule_variables("IPSets", variables, purge) def set_port_variables(self, variables, purge): - return self._set_rule_variables('PortSets', variables, purge) + return self._set_rule_variables("PortSets", variables, purge) def _set_rule_source(self, rule_type, rules): if not rules: return False conflicting_types = self.RULE_TYPES.difference({rule_type}) - rules_source = deepcopy(self._get_resource_value('RulesSource', dict())) + rules_source = deepcopy(self._get_resource_value("RulesSource", dict())) current_keys = set(rules_source.keys()) conflicting_rule_type = conflicting_types.intersection(current_keys) if conflicting_rule_type: - self.module.fail_json('Unable to add {0} rules, {1} rules already set' - .format(rule_type, " and ".join(conflicting_rule_type))) + self.module.fail_json( + f"Unable to add {rule_type} rules, {' and '.join(conflicting_rule_type)} rules already set" + ) original_rules = rules_source.get(rule_type, None) if rules == original_rules: return False rules_source[rule_type] = rules - return self._set_resource_value('RulesSource', rules_source) + return self._set_resource_value("RulesSource", rules_source) def set_rule_string(self, rule): if rule is None: return False if not rule: - self.module.fail_json('Rule string must include at least one rule') + self.module.fail_json("Rule string must include at least one rule") rule = "\n".join(_string_list(rule)) - return self._set_rule_source('RulesString', rule) + return self._set_rule_source("RulesString", rule) def set_domain_list(self, options): if not options: return False changed = False - domain_names = options.get('domain_names') - home_net = options.get('source_ips', None) - action = options.get('action') - filter_http = options.get('filter_http', False) - filter_https = options.get('filter_https', False) + domain_names = options.get("domain_names") + home_net = options.get("source_ips", None) + action = options.get("action") + filter_http = options.get("filter_http", False) + filter_https = options.get("filter_https", False) if home_net: # Seems a little kludgy but the HOME_NET ip variable is how you @@ -840,14 +873,14 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag # Perform some transformations target_types = [] if filter_http: - target_types.append('HTTP_HOST') + target_types.append("HTTP_HOST") if filter_https: - target_types.append('TLS_SNI') + target_types.append("TLS_SNI") - if action == 'allow': - action = 'ALLOWLIST' + if action == "allow": + action = "ALLOWLIST" else: - action = 'DENYLIST' + action = "DENYLIST" # Finally build the 'rule' rule = dict( @@ -855,37 +888,37 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag TargetTypes=target_types, GeneratedRulesType=action, ) - changed |= self._set_rule_source('RulesSourceList', rule) + changed |= self._set_rule_source("RulesSourceList", rule) return changed def _format_rule_options(self, options, sid): formatted_options = [] - opt = dict(Keyword='sid:{0}'.format(sid)) + opt = dict(Keyword=f"sid:{sid}") formatted_options.append(opt) if options: for option in sorted(options.keys()): opt = dict(Keyword=option) settings = options.get(option) if settings: - opt['Settings'] = _string_list(settings) + opt["Settings"] = _string_list(settings) formatted_options.append(opt) return formatted_options def _format_stateful_rule(self, rule): options = self._format_rule_options( - rule.get('rule_options', dict()), - rule.get('sid'), + rule.get("rule_options", dict()), + rule.get("sid"), ) formatted_rule = dict( - Action=rule.get('action').upper(), + Action=rule.get("action").upper(), RuleOptions=options, Header=dict( - Protocol=rule.get('protocol').upper(), - Source=rule.get('source'), - SourcePort=rule.get('source_port'), - Direction=rule.get('direction').upper(), - Destination=rule.get('destination'), - DestinationPort=rule.get('destination_port'), + Protocol=rule.get("protocol").upper(), + Source=rule.get("source"), + SourcePort=rule.get("source_port"), + Direction=rule.get("direction").upper(), + Destination=rule.get("destination"), + DestinationPort=rule.get("destination_port"), ), ) return formatted_rule @@ -894,40 +927,39 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag if rules is None: return False if not rules: - self.module.fail_json(msg='Rule list must include at least one rule') + self.module.fail_json(msg="Rule list must include at least one rule") formatted_rules = [self._format_stateful_rule(r) for r in rules] - return self._set_rule_source('StatefulRules', formatted_rules) + return self._set_rule_source("StatefulRules", formatted_rules) def _do_create_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) params = metadata params.update(self._get_id_params()) - params['RuleGroup'] = resource + params["RuleGroup"] = resource response = self._create_rule_group(**params) return bool(response) def _generate_updated_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) metadata.update(self._get_id_params()) - updated_resource = dict( - RuleGroup=resource, - RuleGroupMetadata=metadata - ) + updated_resource = dict(RuleGroup=resource, RuleGroupMetadata=metadata) return updated_resource def _flush_create(self): # Apply some pre-flight tests before trying to run the creation. - if 'Capacity' not in self._metadata_updates: - self.module.fail_json('Capacity must be provided when creating a new Rule Group') + if "Capacity" not in self._metadata_updates: + self.module.fail_json("Capacity must be provided when creating a new Rule Group") - rules_source = self._get_resource_value('RulesSource', dict()) + rules_source = self._get_resource_value("RulesSource", dict()) rule_type = self.RULE_TYPES.intersection(set(rules_source.keys())) if len(rule_type) != 1: - self.module.fail_json('Exactly one of rule strings, domain list or rule list' - ' must be provided when creating a new rule group', - rule_type=rule_type, keys=self._resource_updates.keys(), - types=self.RULE_TYPES) + self.module.fail_json( + "Exactly one of rule strings, domain list or rule list must be provided when creating a new rule group", + rule_type=rule_type, + keys=self._resource_updates.keys(), + types=self.RULE_TYPES, + ) return super(NetworkFirewallRuleManager, self)._flush_create() @@ -942,7 +974,7 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag params = metadata params.update(self._get_id_params()) - params['RuleGroup'] = resource + params["RuleGroup"] = resource if not self.module.check_mode: response = self._update_rule_group(**params) @@ -960,8 +992,8 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag if not result: return None - rule_group = result.get('RuleGroup', None) - metadata = result.get('RuleGroupMetadata', None) + rule_group = result.get("RuleGroup", None) + metadata = result.get("RuleGroupMetadata", None) self._preupdate_resource = deepcopy(rule_group) self._preupdate_metadata = deepcopy(metadata) return dict(RuleGroup=rule_group, RuleGroupMetadata=metadata) @@ -981,7 +1013,6 @@ class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManag class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, BaseNetworkFirewallManager): - name = None arn = None _group_name_cache = None @@ -998,25 +1029,25 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba def _extra_error_output(self): output = super(NetworkFirewallPolicyManager, self)._extra_error_output() if self.name: - output['FirewallPolicyName'] = self.name + output["FirewallPolicyName"] = self.name if self.arn: - output['FirewallPolicyArn'] = self.arn + output["FirewallPolicyArn"] = self.arn return output def _filter_immutable_metadata_attributes(self, metadata): metadata = super(NetworkFirewallPolicyManager, self)._filter_immutable_metadata_attributes(metadata) - metadata.pop('FirewallPolicyArn', None) - metadata.pop('FirewallPolicyName', None) - metadata.pop('FirewallPolicyId', None) - metadata.pop('FirewallPolicyStatus', None) - metadata.pop('ConsumedStatelessRuleCapacity', None) - metadata.pop('ConsumedStatefulRuleCapacity', None) - metadata.pop('Tags', None) - metadata.pop('NumberOfAssociations', None) + metadata.pop("FirewallPolicyArn", None) + metadata.pop("FirewallPolicyName", None) + metadata.pop("FirewallPolicyId", None) + metadata.pop("FirewallPolicyStatus", None) + metadata.pop("ConsumedStatelessRuleCapacity", None) + metadata.pop("ConsumedStatefulRuleCapacity", None) + metadata.pop("Tags", None) + metadata.pop("NumberOfAssociations", None) return metadata def _get_preupdate_arn(self): - return self._get_metadata_value('FirewallPolicyArn') + return self._get_metadata_value("FirewallPolicyArn") def _get_id_params(self, name=None, arn=None): if arn: @@ -1028,7 +1059,6 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba return dict(FirewallPolicyName=name) def delete(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_policy(**id_params) @@ -1038,8 +1068,8 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba self.updated_resource = dict() # Policy is already in the process of being deleted (takes time) - rule_status = self._get_metadata_value('FirewallPolicyStatus', '').upper() - if rule_status == 'DELETING': + rule_status = self._get_metadata_value("FirewallPolicyStatus", "").upper() + if rule_status == "DELETING": self._wait_for_deletion() return False @@ -1058,7 +1088,7 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba if not policies: return list() - return [p.get('Arn', None) for p in policies] + return [p.get("Arn", None) for p in policies] @property def _rule_group_name_cache(self): @@ -1068,16 +1098,16 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba if not results: return dict() - group_cache = {r.get('Name', None): r.get('Arn', None) for r in results} + group_cache = {r.get("Name", None): r.get("Arn", None) for r in results} self._group_name_cache = group_cache return group_cache @property def _stateful_rule_order(self): - engine_options = self._get_resource_value('StatefulEngineOptions', None) + engine_options = self._get_resource_value("StatefulEngineOptions", None) if not engine_options: - return 'DEFAULT_ACTION_ORDER' - return engine_options.get('RuleOrder', 'DEFAULT_ACTION_ORDER') + return "DEFAULT_ACTION_ORDER" + return engine_options.get("RuleOrder", "DEFAULT_ACTION_ORDER") def _canonicalize_rule_group(self, name, group_type): """Iterates through a mixed list of ARNs and Names converting them to @@ -1085,20 +1115,22 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba """ arn = None # : is only valid in ARNs - if ':' in name: + if ":" in name: arn = name else: arn = self._rule_group_name_cache.get(name, None) if not arn: - self.module.fail_json('Unable to fetch ARN for rule group', name=name, - group_name_cache=self._rule_group_name_cache) + self.module.fail_json( + "Unable to fetch ARN for rule group", name=name, group_name_cache=self._rule_group_name_cache + ) arn_info = parse_aws_arn(arn) if not arn_info: - self.module.fail_json('Unable to parse ARN for rule group', arn=arn, arn_info=arn_info) - arn_type = arn_info['resource'].split('/')[0] + self.module.fail_json("Unable to parse ARN for rule group", arn=arn, arn_info=arn_info) + arn_type = arn_info["resource"].split("/")[0] if arn_type != group_type: - self.module.fail_json('Rule group not of expected type', name=name, - arn=arn, expected_type=group_type, found_type=arn_type) + self.module.fail_json( + "Rule group not of expected type", name=name, arn=arn, expected_type=group_type, found_type=arn_type + ) return arn @@ -1107,15 +1139,15 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba for idx, arn in enumerate(groups): entry = dict(ResourceArn=arn) if strict_order: - entry['Priority'] = idx + 1 + entry["Priority"] = idx + 1 formated_groups.append(entry) return formated_groups def _rulegroup_references_list(self, groups): - return [g.get('ResourceArn') for g in groups] + return [g.get("ResourceArn") for g in groups] def _sorted_rulegroup_references_list(self, groups): - sorted_list = sorted(groups, key=lambda g: g.get('Priority', None)) + sorted_list = sorted(groups, key=lambda g: g.get("Priority", None)) return self._rulegroup_references_list(sorted_list) def _compare_rulegroup_references(self, current_groups, desired_groups, strict_order): @@ -1132,23 +1164,22 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba if value is None: return False - engine_options = deepcopy(self._get_resource_value('StatefulEngineOptions', dict())) + engine_options = deepcopy(self._get_resource_value("StatefulEngineOptions", dict())) if value == engine_options.get(option_name, default_value): return False if immutable and self.original_resource: - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") engine_options[option_name] = value - return self._set_resource_value('StatefulEngineOptions', engine_options) + return self._set_resource_value("StatefulEngineOptions", engine_options) def set_stateful_rule_order(self, order): RULE_ORDER_MAP = { - 'default': 'DEFAULT_ACTION_ORDER', - 'strict': 'STRICT_ORDER', + "default": "DEFAULT_ACTION_ORDER", + "strict": "STRICT_ORDER", } value = RULE_ORDER_MAP.get(order) - changed = self._set_engine_option('RuleOrder', 'Rule order', value, True, 'DEFAULT_ACTION_ORDER') + changed = self._set_engine_option("RuleOrder", "Rule order", value, True, "DEFAULT_ACTION_ORDER") self.changed |= changed return changed @@ -1163,14 +1194,11 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba return self._set_resource_value(parameter_name, formated_groups) def set_stateful_rule_groups(self, groups): - strict_order = self._stateful_rule_order == 'STRICT_ORDER' - return self._set_rule_groups(groups, 'stateful-rulegroup', - 'StatefulRuleGroupReferences', - strict_order) + strict_order = self._stateful_rule_order == "STRICT_ORDER" + return self._set_rule_groups(groups, "stateful-rulegroup", "StatefulRuleGroupReferences", strict_order) def set_stateless_rule_groups(self, groups): - return self._set_rule_groups(groups, 'stateless-rulegroup', - 'StatelessRuleGroupReferences', True) + return self._set_rule_groups(groups, "stateless-rulegroup", "StatelessRuleGroupReferences", True) def set_default_actions(self, key, actions, valid_actions=None): if actions is None: @@ -1179,38 +1207,35 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba invalid_actions = list(set(actions) - set(valid_actions or [])) if valid_actions and invalid_actions: self.module.fail_json( - msg='{0} contains invalid actions'.format(key), - valid_actions=valid_actions, invalid_actions=invalid_actions, - actions=actions) + msg=f"{key} contains invalid actions", + valid_actions=valid_actions, + invalid_actions=invalid_actions, + actions=actions, + ) return self._set_resource_value(key, actions) def set_stateful_default_actions(self, actions): if actions is None: return False - if self._stateful_rule_order != 'STRICT_ORDER': - self.module.fail_json(msg='Stateful default actions can only be set when using strict rule order') + if self._stateful_rule_order != "STRICT_ORDER": + self.module.fail_json(msg="Stateful default actions can only be set when using strict rule order") - valid_actions = [ - 'aws:drop_strict', 'aws:drop_established', - 'aws:alert_strict', 'aws:alert_established' - ] - return self.set_default_actions('StatefulDefaultActions', actions, valid_actions) + valid_actions = ["aws:drop_strict", "aws:drop_established", "aws:alert_strict", "aws:alert_established"] + return self.set_default_actions("StatefulDefaultActions", actions, valid_actions) def _set_stateless_default_actions(self, key, actions): - valid_actions = [ - 'aws:pass', 'aws:drop', 'aws:forward_to_sfe' - ] - custom_actions = self._get_resource_value('StatelessCustomActions', dict()) - custom_action_names = [a['ActionName'] for a in custom_actions] + valid_actions = ["aws:pass", "aws:drop", "aws:forward_to_sfe"] + custom_actions = self._get_resource_value("StatelessCustomActions", dict()) + custom_action_names = [a["ActionName"] for a in custom_actions] valid_actions.extend(custom_action_names) return self.set_default_actions(key, actions, valid_actions) def set_stateless_default_actions(self, actions): - return self._set_stateless_default_actions('StatelessDefaultActions', actions) + return self._set_stateless_default_actions("StatelessDefaultActions", actions) def set_stateless_fragment_default_actions(self, actions): - return self._set_stateless_default_actions('StatelessFragmentDefaultActions', actions) + return self._set_stateless_default_actions("StatelessFragmentDefaultActions", actions) def _normalize_policy(self, policy): if policy is None: @@ -1226,20 +1251,19 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba def _normalize_policy_result(self, result): if result is None: return None - policy = self._normalize_policy(result.get('FirewallPolicy', None)) - policy_metadata = self._normalize_policy_metadata(result.get('FirewallPolicyMetadata', None)) + policy = self._normalize_policy(result.get("FirewallPolicy", None)) + policy_metadata = self._normalize_policy_metadata(result.get("FirewallPolicyMetadata", None)) result = dict() if policy: - result['policy'] = policy + result["policy"] = policy if policy_metadata: - result['policy_metadata'] = policy_metadata + result["policy_metadata"] = policy_metadata return result def _normalize_resource(self, resource): return self._normalize_policy_result(resource) def get_policy(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_policy(**id_params) @@ -1251,21 +1275,21 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba def _format_custom_action(self, action): formatted_action = dict( - ActionName=action['name'], + ActionName=action["name"], ) action_definition = dict() - if 'publish_metric_dimension_value' in action: - values = _string_list(action['publish_metric_dimension_value']) + if "publish_metric_dimension_value" in action: + values = _string_list(action["publish_metric_dimension_value"]) dimensions = [dict(Value=v) for v in values] - action_definition['PublishMetricAction'] = dict( + action_definition["PublishMetricAction"] = dict( Dimensions=dimensions, ) if action_definition: - formatted_action['ActionDefinition'] = action_definition + formatted_action["ActionDefinition"] = action_definition return formatted_action def _custom_action_map(self, actions): - return {a['ActionName']: a['ActionDefinition'] for a in actions} + return {a["ActionName"]: a["ActionDefinition"] for a in actions} def set_custom_stateless_actions(self, actions, purge_actions): if actions is None: @@ -1273,9 +1297,7 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba new_action_list = [self._format_custom_action(a) for a in actions] new_action_map = self._custom_action_map(new_action_list) - existing_action_map = self._custom_action_map( - self._get_resource_value('StatelessCustomActions', []) - ) + existing_action_map = self._custom_action_map(self._get_resource_value("StatelessCustomActions", [])) if purge_actions: desired_action_map = dict() else: @@ -1286,34 +1308,31 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba return False action_list = [dict(ActionName=k, ActionDefinition=v) for k, v in desired_action_map.items()] - self._set_resource_value('StatelessCustomActions', action_list) + self._set_resource_value("StatelessCustomActions", action_list) def set_description(self, description): - return self._set_metadata_value('Description', description) + return self._set_metadata_value("Description", description) def _do_create_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) params = metadata params.update(self._get_id_params()) - params['FirewallPolicy'] = resource + params["FirewallPolicy"] = resource response = self._create_policy(**params) return bool(response) def _generate_updated_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) metadata.update(self._get_id_params()) - updated_resource = dict( - FirewallPolicy=resource, - FirewallPolicyMetadata=metadata - ) + updated_resource = dict(FirewallPolicy=resource, FirewallPolicyMetadata=metadata) return updated_resource def _flush_create(self): # Set some defaults - if self._get_resource_value('StatelessDefaultActions', None) is None: - self._set_resource_value('StatelessDefaultActions', ['aws:forward_to_sfe']) - if self._get_resource_value('StatelessFragmentDefaultActions', None) is None: - self._set_resource_value('StatelessFragmentDefaultActions', ['aws:forward_to_sfe']) + if self._get_resource_value("StatelessDefaultActions", None) is None: + self._set_resource_value("StatelessDefaultActions", ["aws:forward_to_sfe"]) + if self._get_resource_value("StatelessFragmentDefaultActions", None) is None: + self._set_resource_value("StatelessFragmentDefaultActions", ["aws:forward_to_sfe"]) return super(NetworkFirewallPolicyManager, self)._flush_create() def _do_update_resource(self): @@ -1327,7 +1346,7 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba params = metadata params.update(self._get_id_params()) - params['FirewallPolicy'] = resource + params["FirewallPolicy"] = resource if not self.module.check_mode: response = self._update_policy(**params) @@ -1345,13 +1364,13 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba if not result: return None - policy = result.get('FirewallPolicy', None) + policy = result.get("FirewallPolicy", None) # During deletion, there's a phase where this will return Metadata but # no policy if policy is None: policy = dict() - metadata = result.get('FirewallPolicyMetadata', None) + metadata = result.get("FirewallPolicyMetadata", None) self._preupdate_resource = deepcopy(policy) self._preupdate_metadata = deepcopy(metadata) return dict(FirewallPolicy=policy, FirewallPolicyMetadata=metadata) @@ -1371,7 +1390,6 @@ class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, Ba class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetworkFirewallManager): - name = None arn = None ec2_manager = None @@ -1393,13 +1411,13 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo def _extra_error_output(self): output = super(NetworkFirewallManager, self)._extra_error_output() if self.name: - output['FirewallName'] = self.name + output["FirewallName"] = self.name if self.arn: - output['FirewallArn'] = self.arn + output["FirewallArn"] = self.arn return output def _get_preupdate_arn(self): - return self._get_resource_value('FirewallArn') + return self._get_resource_value("FirewallArn") def _get_id_params(self, name=None, arn=None): if arn: @@ -1410,11 +1428,10 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo name = self.name if not name: # Users should never see this, but let's cover ourself - self.module.fail_json(msg='Firewall identifier parameters missing') + self.module.fail_json(msg="Firewall identifier parameters missing") return dict(FirewallName=name) def delete(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_firewall(**id_params) @@ -1424,8 +1441,8 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo self.updated_resource = dict() # Firewall is already in the process of being deleted (takes time) - firewall_status = self._get_metadata_value('Status', '').upper() - if firewall_status == 'DELETING': + firewall_status = self._get_metadata_value("Status", "").upper() + if firewall_status == "DELETING": self._wait_for_deletion() return False @@ -1433,9 +1450,10 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo self.changed = True return True - if 'DeleteProtection' in self._resource_updates: + if "DeleteProtection" in self._resource_updates: self._update_firewall_delete_protection( - DeleteProtection=self._resource_updates['DeleteProtection'], **id_params, + DeleteProtection=self._resource_updates["DeleteProtection"], + **id_params, ) result = self._delete_firewall(**id_params) @@ -1446,55 +1464,54 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo def list(self, vpc_ids=None): params = dict() if vpc_ids: - params['VpcIds'] = vpc_ids + params["VpcIds"] = vpc_ids firewalls = self._list_firewalls(**params) if not firewalls: return list() - return [f.get('FirewallArn', None) for f in firewalls] + return [f.get("FirewallArn", None) for f in firewalls] def _normalize_firewall(self, firewall): if firewall is None: return None - subnets = [s.get('SubnetId') for s in firewall.get('SubnetMappings', [])] + subnets = [s.get("SubnetId") for s in firewall.get("SubnetMappings", [])] firewall = self._normalize_boto3_resource(firewall, add_tags=True) - firewall['subnets'] = subnets + firewall["subnets"] = subnets return firewall def _normalize_sync_state_config(self, policy): return self._normalize_boto3_resource(policy) def _normalize_sync_state(self, state): - config = {k: self._normalize_sync_state_config(v) for k, v in state.pop('Config', {}).items()} + config = {k: self._normalize_sync_state_config(v) for k, v in state.pop("Config", {}).items()} state = self._normalize_boto3_resource(state) - state['config'] = config or {} + state["config"] = config or {} return state def _normalize_firewall_metadata(self, firewall_metadata): if firewall_metadata is None: return None - states = {k: self._normalize_sync_state(v) for k, v in firewall_metadata.pop('SyncStates', {}).items()} + states = {k: self._normalize_sync_state(v) for k, v in firewall_metadata.pop("SyncStates", {}).items()} metadata = self._normalize_boto3_resource(firewall_metadata, add_tags=False) - metadata['sync_states'] = states or {} + metadata["sync_states"] = states or {} return metadata def _normalize_firewall_result(self, result): if result is None: return None - firewall = self._normalize_firewall(result.get('Firewall', None)) - firewall_metadata = self._normalize_firewall_metadata(result.get('FirewallMetadata', None)) + firewall = self._normalize_firewall(result.get("Firewall", None)) + firewall_metadata = self._normalize_firewall_metadata(result.get("FirewallMetadata", None)) result = camel_dict_to_snake_dict(result) if firewall: - result['firewall'] = firewall + result["firewall"] = firewall if firewall_metadata: - result['firewall_metadata'] = firewall_metadata + result["firewall_metadata"] = firewall_metadata return result def _normalize_resource(self, resource): return self._normalize_firewall_result(resource) def get_firewall(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_firewall(**id_params) @@ -1506,8 +1523,8 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo @property def _subnets(self): - subnet_mappings = self._get_resource_value('SubnetMappings', []) - subnets = [s.get('SubnetId') for s in subnet_mappings] + subnet_mappings = self._get_resource_value("SubnetMappings", []) + subnets = [s.get("SubnetId") for s in subnet_mappings] return subnets def _subnets_to_vpc(self, subnets, subnet_details=None): @@ -1515,11 +1532,13 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo return None if not subnet_details: subnet_details = self.ec2_manager._describe_subnets(SubnetIds=list(subnets)) - vpcs = [s.get('VpcId') for s in subnet_details] + vpcs = [s.get("VpcId") for s in subnet_details] if len(set(vpcs)) > 1: self.module.fail_json( - msg='Firewall subnets may only be in one VPC, multiple VPCs found', - vpcs=list(set(vpcs)), subnets=subnet_details) + msg="Firewall subnets may only be in one VPC, multiple VPCs found", + vpcs=list(set(vpcs)), + subnets=subnet_details, + ) return vpcs[0] def _format_subnet_mapping(self, subnets): @@ -1535,7 +1554,7 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo if not results: return dict() - policy_cache = {p.get('Name', None): p.get('Arn', None) for p in results} + policy_cache = {p.get("Name", None): p.get("Arn", None) for p in results} self._policy_list_cache = policy_cache return policy_cache @@ -1545,20 +1564,26 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo """ arn = None # : is only valid in ARNs - if ':' in name: + if ":" in name: arn = name else: arn = self._policy_name_cache.get(name, None) if not arn: - self.module.fail_json('Unable to fetch ARN for policy', name=name, - policy_name_cache=self._policy_name_cache) + self.module.fail_json( + "Unable to fetch ARN for policy", name=name, policy_name_cache=self._policy_name_cache + ) arn_info = parse_aws_arn(arn) if not arn_info: - self.module.fail_json('Unable to parse ARN for policy', arn=arn, arn_info=arn_info) - arn_type = arn_info['resource'].split('/')[0] - if arn_type != 'firewall-policy': - self.module.fail_json('Policy ARN not of expected resource type', name=name, - arn=arn, expected_type='firewall-policy', found_type=arn_type) + self.module.fail_json("Unable to parse ARN for policy", arn=arn, arn_info=arn_info) + arn_type = arn_info["resource"].split("/")[0] + if arn_type != "firewall-policy": + self.module.fail_json( + "Policy ARN not of expected resource type", + name=name, + arn=arn, + expected_type="firewall-policy", + found_type=arn_type, + ) return arn @@ -1569,15 +1594,15 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo # Because the canonicalization of a non-ARN policy name will require an API call, # try comparing the current name to the policy name we've been passed. # If they match we don't need to perform the lookup. - current_policy = self._get_resource_value('FirewallPolicyArn', None) + current_policy = self._get_resource_value("FirewallPolicyArn", None) if current_policy: arn_info = parse_aws_arn(current_policy) - current_name = arn_info['resource'].split('/')[-1] + current_name = arn_info["resource"].split("/")[-1] if current_name == policy: return False policy = self._canonicalize_policy(policy) - return self._set_resource_value('FirewallPolicyArn', policy) + return self._set_resource_value("FirewallPolicyArn", policy) def set_subnets(self, subnets, purge=True): if subnets is None: @@ -1593,31 +1618,31 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo subnet_details = self.ec2_manager._describe_subnets(SubnetIds=list(desired_subnets)) vpc = self._subnets_to_vpc(desired_subnets, subnet_details) - self._set_resource_value('VpcId', vpc, description='firewall VPC', immutable=True) + self._set_resource_value("VpcId", vpc, description="firewall VPC", immutable=True) - azs = [s.get('AvailabilityZoneId') for s in subnet_details] + azs = [s.get("AvailabilityZoneId") for s in subnet_details] if len(azs) != len(set(azs)): self.module.fail_json( - msg='Only one subnet per availability zone may set.', - availability_zones=azs, subnets=subnet_details) + msg="Only one subnet per availability zone may set.", availability_zones=azs, subnets=subnet_details + ) subnets_to_add = list(desired_subnets.difference(current_subnets)) subnets_to_remove = list(current_subnets.difference(desired_subnets)) self._subnet_updates = dict(add=subnets_to_add, remove=subnets_to_remove) - self._set_resource_value('SubnetMappings', self._format_subnet_mapping(desired_subnets)) + self._set_resource_value("SubnetMappings", self._format_subnet_mapping(desired_subnets)) return True def set_policy_change_protection(self, protection): - return self._set_resource_value('FirewallPolicyChangeProtection', protection) + return self._set_resource_value("FirewallPolicyChangeProtection", protection) def set_subnet_change_protection(self, protection): - return self._set_resource_value('SubnetChangeProtection', protection) + return self._set_resource_value("SubnetChangeProtection", protection) def set_delete_protection(self, protection): - return self._set_resource_value('DeleteProtection', protection) + return self._set_resource_value("DeleteProtection", protection) def set_description(self, description): - return self._set_resource_value('Description', description) + return self._set_resource_value("Description", description) def _do_create_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) @@ -1630,10 +1655,7 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo def _generate_updated_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) resource.update(self._get_id_params()) - updated_resource = dict( - Firewall=resource, - FirewallMetadata=metadata - ) + updated_resource = dict(Firewall=resource, FirewallMetadata=metadata) return updated_resource def _flush_create(self): @@ -1655,59 +1677,60 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo # There's no tool for 'bulk' updates, we need to iterate through these # one at a time... - if 'Description' in resource_updates: + if "Description" in resource_updates: self._update_firewall_description( - Description=resource_updates['Description'], **id_params, + Description=resource_updates["Description"], + **id_params, ) - if 'DeleteProtection' in resource_updates: + if "DeleteProtection" in resource_updates: self._update_firewall_delete_protection( - DeleteProtection=resource_updates['DeleteProtection'], **id_params, + DeleteProtection=resource_updates["DeleteProtection"], + **id_params, ) # Disable Change Protection... # When disabling change protection, do so *before* making changes - if 'FirewallPolicyChangeProtection' in resource_updates: - if not self._get_resource_value('FirewallPolicyChangeProtection'): + if "FirewallPolicyChangeProtection" in resource_updates: + if not self._get_resource_value("FirewallPolicyChangeProtection"): self._update_firewall_policy_change_protection( - FirewallPolicyChangeProtection=resource_updates['FirewallPolicyChangeProtection'], **id_params, + FirewallPolicyChangeProtection=resource_updates["FirewallPolicyChangeProtection"], + **id_params, ) - if 'SubnetChangeProtection' in resource_updates: - if not self._get_resource_value('SubnetChangeProtection'): + if "SubnetChangeProtection" in resource_updates: + if not self._get_resource_value("SubnetChangeProtection"): self._update_subnet_change_protection( - SubnetChangeProtection=resource_updates['SubnetChangeProtection'], **id_params, + SubnetChangeProtection=resource_updates["SubnetChangeProtection"], + **id_params, ) # General Changes - if 'SubnetMappings' in resource_updates: + if "SubnetMappings" in resource_updates: self._slow_start_change = True - subnets_to_add = self._subnet_updates.get('add', None) - subnets_to_remove = self._subnet_updates.get('remove', None) + subnets_to_add = self._subnet_updates.get("add", None) + subnets_to_remove = self._subnet_updates.get("remove", None) if subnets_to_remove: - self._disassociate_subnets( - SubnetIds=subnets_to_remove, **id_params) + self._disassociate_subnets(SubnetIds=subnets_to_remove, **id_params) if subnets_to_add: subnets_to_add = self._format_subnet_mapping(subnets_to_add) - self._associate_subnets( - SubnetMappings=subnets_to_add, **id_params) + self._associate_subnets(SubnetMappings=subnets_to_add, **id_params) - if 'FirewallPolicyArn' in resource_updates: + if "FirewallPolicyArn" in resource_updates: self._slow_start_change = True - self._associate_firewall_policy( - FirewallPolicyArn=resource_updates['FirewallPolicyArn'], - **id_params - ) + self._associate_firewall_policy(FirewallPolicyArn=resource_updates["FirewallPolicyArn"], **id_params) # Enable Change Protection. # When enabling change protection, do so *after* making changes - if 'FirewallPolicyChangeProtection' in resource_updates: - if self._get_resource_value('FirewallPolicyChangeProtection'): + if "FirewallPolicyChangeProtection" in resource_updates: + if self._get_resource_value("FirewallPolicyChangeProtection"): self._update_firewall_policy_change_protection( - FirewallPolicyChangeProtection=resource_updates['FirewallPolicyChangeProtection'], **id_params, + FirewallPolicyChangeProtection=resource_updates["FirewallPolicyChangeProtection"], + **id_params, ) - if 'SubnetChangeProtection' in resource_updates: - if self._get_resource_value('SubnetChangeProtection'): + if "SubnetChangeProtection" in resource_updates: + if self._get_resource_value("SubnetChangeProtection"): self._update_subnet_change_protection( - SubnetChangeProtection=resource_updates['SubnetChangeProtection'], **id_params, + SubnetChangeProtection=resource_updates["SubnetChangeProtection"], + **id_params, ) return True @@ -1724,8 +1747,8 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo if not result: return None - firewall = result.get('Firewall', None) - metadata = result.get('FirewallMetadata', None) + firewall = result.get("Firewall", None) + metadata = result.get("FirewallMetadata", None) self._preupdate_resource = deepcopy(firewall) self._preupdate_metadata = deepcopy(metadata) return dict(Firewall=firewall, FirewallMetadata=metadata) @@ -1756,7 +1779,7 @@ class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetwo # Unlike RuleGroups and Policies for some reason Firewalls have the tags set # directly on the resource. def _set_tag_values(self, desired_tags): - return self._set_resource_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags)) + return self._set_resource_value("Tags", ansible_dict_to_boto3_tag_list(desired_tags)) def _get_tag_values(self): - return self._get_resource_value('Tags', []) + return self._get_resource_value("Tags", []) diff --git a/ansible_collections/community/aws/plugins/module_utils/opensearch.py b/ansible_collections/community/aws/plugins/module_utils/opensearch.py index 8189378e5..13d90bd6a 100644 --- a/ansible_collections/community/aws/plugins/module_utils/opensearch.py +++ b/ansible_collections/community/aws/plugins/module_utils/opensearch.py @@ -1,31 +1,26 @@ -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function +# -*- coding: utf-8 -*- -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from copy import deepcopy import datetime import functools import time +from copy import deepcopy try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - ansible_dict_to_boto3_tag_list, - camel_dict_to_snake_dict, - compare_aws_tags, -) -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ( - boto3_tag_list_to_ansible_dict, -) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + def get_domain_status(client, module, domain_name): """ @@ -35,8 +30,11 @@ def get_domain_status(client, module, domain_name): response = client.describe_domain(DomainName=domain_name) except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get domain {domain_name}") return response["DomainStatus"] @@ -54,13 +52,17 @@ def get_domain_config(client, module, domain_name): response = client.describe_domain_config(DomainName=domain_name) except is_boto3_error_code("ResourceNotFoundException"): return (None, None) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get domain {domain_name}") domain_config = {} arn = None if response is not None: for k in response["DomainConfig"]: - domain_config[k] = response["DomainConfig"][k]["Options"] + if "Options" in response["DomainConfig"][k]: + domain_config[k] = response["DomainConfig"][k]["Options"] domain_config["DomainName"] = domain_name # If ES cluster is attached to the Internet, the "VPCOptions" property is not present. if "VPCOptions" in domain_config: @@ -93,13 +95,9 @@ def normalize_opensearch(client, module, domain): convert the attributes from camel case to snake case, and return the object. """ try: - domain["Tags"] = boto3_tag_list_to_ansible_dict( - client.list_tags(ARN=domain["ARN"], aws_retry=True)["TagList"] - ) + domain["Tags"] = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain["ARN"], aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, "Couldn't get tags for domain %s" % domain["domain_name"] - ) + module.fail_json_aws(e, f"Couldn't get tags for domain {domain['domain_name']}") except KeyError: module.fail_json(msg=str(domain)) @@ -133,16 +131,14 @@ def wait_for_domain_status(client, module, domain_name, waiter_name): return time.sleep(15) # Timeout occured. - module.fail_json( - msg=f"Timeout waiting for wait state '{waiter_name}'. {status_msg}" - ) + module.fail_json(msg=f"Timeout waiting for wait state '{waiter_name}'. {status_msg}") def parse_version(engine_version): - ''' + """ Parse the engine version, which should be Elasticsearch_X.Y or OpenSearch_X.Y Return dict { 'engine_type': engine_type, 'major': major, 'minor': minor } - ''' + """ version = engine_version.split("_") if len(version) != 2: return None @@ -150,19 +146,19 @@ def parse_version(engine_version): if len(semver) != 2: return None engine_type = version[0] - if engine_type not in ['Elasticsearch', 'OpenSearch']: + if engine_type not in ["Elasticsearch", "OpenSearch"]: return None if not (semver[0].isdigit() and semver[1].isdigit()): return None major = int(semver[0]) minor = int(semver[1]) - return {'engine_type': engine_type, 'major': major, 'minor': minor} + return {"engine_type": engine_type, "major": major, "minor": minor} def compare_domain_versions(version1, version2): supported_engines = { - 'Elasticsearch': 1, - 'OpenSearch': 2, + "Elasticsearch": 1, + "OpenSearch": 2, } if isinstance(version1, string_types): version1 = parse_version(version1) @@ -174,21 +170,21 @@ def compare_domain_versions(version1, version2): return 1 elif version1 is None and version2 is None: return 0 - e1 = supported_engines.get(version1.get('engine_type')) - e2 = supported_engines.get(version2.get('engine_type')) + e1 = supported_engines.get(version1.get("engine_type")) + e2 = supported_engines.get(version2.get("engine_type")) if e1 < e2: return -1 elif e1 > e2: return 1 else: - if version1.get('major') < version2.get('major'): + if version1.get("major") < version2.get("major"): return -1 - elif version1.get('major') > version2.get('major'): + elif version1.get("major") > version2.get("major"): return 1 else: - if version1.get('minor') < version2.get('minor'): + if version1.get("minor") < version2.get("minor"): return -1 - elif version1.get('minor') > version2.get('minor'): + elif version1.get("minor") > version2.get("minor"): return 1 else: return 0 @@ -208,22 +204,15 @@ def get_target_increment_version(client, module, domain_name, target_version): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws( e, - msg="Couldn't get compatible versions for domain {0}".format( - domain_name), + msg=f"Couldn't get compatible versions for domain {domain_name}", ) - compat = api_compatible_versions.get('CompatibleVersions') + compat = api_compatible_versions.get("CompatibleVersions") if compat is None: - module.fail_json( - "Unable to determine list of compatible versions", - compatible_versions=api_compatible_versions) + module.fail_json("Unable to determine list of compatible versions", compatible_versions=api_compatible_versions) if len(compat) == 0: - module.fail_json( - "Unable to determine list of compatible versions", - compatible_versions=api_compatible_versions) + module.fail_json("Unable to determine list of compatible versions", compatible_versions=api_compatible_versions) if compat[0].get("TargetVersions") is None: - module.fail_json( - "No compatible versions found", - compatible_versions=api_compatible_versions) + module.fail_json("No compatible versions found", compatible_versions=api_compatible_versions) compatible_versions = [] for v in compat[0].get("TargetVersions"): if target_version == v: @@ -248,9 +237,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): changed = bool(tags_to_add or tags_to_remove) if tags_to_add: if module.check_mode: - module.exit_json( - changed=True, msg="Would have added tags to domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have added tags to domain if not in check mode") try: client.add_tags( ARN=resource_arn, @@ -260,21 +247,15 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't add tags to domain {0}".format(resource_arn) - ) + module.fail_json_aws(e, f"Couldn't add tags to domain {resource_arn}") if tags_to_remove: if module.check_mode: - module.exit_json( - changed=True, msg="Would have removed tags if not in check mode" - ) + module.exit_json(changed=True, msg="Would have removed tags if not in check mode") try: client.remove_tags(ARN=resource_arn, TagKeys=tags_to_remove) except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't remove tags from domain {0}".format(resource_arn) - ) + module.fail_json_aws(e, f"Couldn't remove tags from domain {resource_arn}") return changed diff --git a/ansible_collections/community/aws/plugins/module_utils/sns.py b/ansible_collections/community/aws/plugins/module_utils/sns.py index 44327d493..3c4e2a436 100644 --- a/ansible_collections/community/aws/plugins/module_utils/sns.py +++ b/ansible_collections/community/aws/plugins/module_utils/sns.py @@ -1,53 +1,57 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -import re import copy +import re try: import botocore except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @AWSRetry.jittered_backoff() def _list_topics_with_backoff(client): - paginator = client.get_paginator('list_topics') - return paginator.paginate().build_full_result()['Topics'] + paginator = client.get_paginator("list_topics") + return paginator.paginate().build_full_result()["Topics"] -@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["NotFound"]) def _list_topic_subscriptions_with_backoff(client, topic_arn): - paginator = client.get_paginator('list_subscriptions_by_topic') - return paginator.paginate(TopicArn=topic_arn).build_full_result()['Subscriptions'] + paginator = client.get_paginator("list_subscriptions_by_topic") + return paginator.paginate(TopicArn=topic_arn).build_full_result()["Subscriptions"] -@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["NotFound"]) def _list_subscriptions_with_backoff(client): - paginator = client.get_paginator('list_subscriptions') - return paginator.paginate().build_full_result()['Subscriptions'] + paginator = client.get_paginator("list_subscriptions") + return paginator.paginate().build_full_result()["Subscriptions"] def list_topic_subscriptions(client, module, topic_arn): try: return _list_topic_subscriptions_with_backoff(client, topic_arn) - except is_boto3_error_code('AuthorizationError'): + except is_boto3_error_code("AuthorizationError"): try: # potentially AuthorizationError when listing subscriptions for third party topic - return [sub for sub in _list_subscriptions_with_backoff(client) - if sub['TopicArn'] == topic_arn] + return [sub for sub in _list_subscriptions_with_backoff(client) if sub["TopicArn"] == topic_arn] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) + module.fail_json_aws(e, msg=f"Couldn't get subscriptions list for topic {topic_arn}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get subscriptions list for topic {topic_arn}") def list_topics(client, module): @@ -55,13 +59,13 @@ def list_topics(client, module): topics = _list_topics_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get topic list") - return [t['TopicArn'] for t in topics] + return [t["TopicArn"] for t in topics] def topic_arn_lookup(client, module, name): # topic names cannot have colons, so this captures the full topic name all_topics = list_topics(client, module) - lookup_topic = ':%s' % name + lookup_topic = f":{name}" for topic in all_topics: if topic.endswith(lookup_topic): return topic @@ -72,13 +76,13 @@ def compare_delivery_policies(policy_a, policy_b): _policy_b = copy.deepcopy(policy_b) # AWS automatically injects disableSubscriptionOverrides if you set an # http policy - if 'http' in policy_a: - if 'disableSubscriptionOverrides' not in policy_a['http']: - _policy_a['http']['disableSubscriptionOverrides'] = False - if 'http' in policy_b: - if 'disableSubscriptionOverrides' not in policy_b['http']: - _policy_b['http']['disableSubscriptionOverrides'] = False - comparison = (_policy_a != _policy_b) + if "http" in policy_a: + if "disableSubscriptionOverrides" not in policy_a["http"]: + _policy_a["http"]["disableSubscriptionOverrides"] = False + if "http" in policy_b: + if "disableSubscriptionOverrides" not in policy_b["http"]: + _policy_b["http"]["disableSubscriptionOverrides"] = False + comparison = _policy_a != _policy_b return comparison @@ -86,15 +90,15 @@ def canonicalize_endpoint(protocol, endpoint): # AWS SNS expects phone numbers in # and canonicalizes to E.164 format # See <https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html> - if protocol == 'sms': - return re.sub('[^0-9+]*', '', endpoint) + if protocol == "sms": + return re.sub("[^0-9+]*", "", endpoint) return endpoint def get_tags(client, module, topic_arn): try: - return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceArn=topic_arn)['Tags']) - except is_boto3_error_code('AuthorizationError'): + return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceArn=topic_arn)["Tags"]) + except is_boto3_error_code("AuthorizationError"): module.warn("Permission denied accessing tags") return {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -102,52 +106,53 @@ def get_tags(client, module, topic_arn): def get_info(connection, module, topic_arn): - name = module.params.get('name') - topic_type = module.params.get('topic_type') - state = module.params.get('state') - subscriptions = module.params.get('subscriptions') - purge_subscriptions = module.params.get('purge_subscriptions') - content_based_deduplication = module.params.get('content_based_deduplication') - subscriptions_existing = module.params.get('subscriptions_existing', []) - subscriptions_deleted = module.params.get('subscriptions_deleted', []) - subscriptions_added = module.params.get('subscriptions_added', []) - subscriptions_added = module.params.get('subscriptions_added', []) - topic_created = module.params.get('topic_created', False) - topic_deleted = module.params.get('topic_deleted', False) - attributes_set = module.params.get('attributes_set', []) + name = module.params.get("name") + topic_type = module.params.get("topic_type") + state = module.params.get("state") + subscriptions = module.params.get("subscriptions") + purge_subscriptions = module.params.get("purge_subscriptions") + content_based_deduplication = module.params.get("content_based_deduplication") + subscriptions_existing = module.params.get("subscriptions_existing", []) + subscriptions_deleted = module.params.get("subscriptions_deleted", []) + subscriptions_added = module.params.get("subscriptions_added", []) + subscriptions_added = module.params.get("subscriptions_added", []) + topic_created = module.params.get("topic_created", False) + topic_deleted = module.params.get("topic_deleted", False) + attributes_set = module.params.get("attributes_set", []) check_mode = module.check_mode info = { - 'name': name, - 'topic_type': topic_type, - 'state': state, - 'subscriptions_new': subscriptions, - 'subscriptions_existing': subscriptions_existing, - 'subscriptions_deleted': subscriptions_deleted, - 'subscriptions_added': subscriptions_added, - 'subscriptions_purge': purge_subscriptions, - 'content_based_deduplication': content_based_deduplication, - 'check_mode': check_mode, - 'topic_created': topic_created, - 'topic_deleted': topic_deleted, - 'attributes_set': attributes_set, + "name": name, + "topic_type": topic_type, + "state": state, + "subscriptions_new": subscriptions, + "subscriptions_existing": subscriptions_existing, + "subscriptions_deleted": subscriptions_deleted, + "subscriptions_added": subscriptions_added, + "subscriptions_purge": purge_subscriptions, + "content_based_deduplication": content_based_deduplication, + "check_mode": check_mode, + "topic_created": topic_created, + "topic_deleted": topic_deleted, + "attributes_set": attributes_set, } - if state != 'absent': + if state != "absent": if topic_arn in list_topics(connection, module): - info.update(camel_dict_to_snake_dict(connection.get_topic_attributes(TopicArn=topic_arn)['Attributes'])) - info['delivery_policy'] = info.pop('effective_delivery_policy') - info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in list_topic_subscriptions(connection, module, topic_arn)] + info.update(camel_dict_to_snake_dict(connection.get_topic_attributes(TopicArn=topic_arn)["Attributes"])) + info["delivery_policy"] = info.pop("effective_delivery_policy") + info["subscriptions"] = [ + camel_dict_to_snake_dict(sub) for sub in list_topic_subscriptions(connection, module, topic_arn) + ] info["tags"] = get_tags(connection, module, topic_arn) return info def update_tags(client, module, topic_arn): - - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False existing_tags = get_tags(client, module, topic_arn) - to_update, to_delete = compare_aws_tags(existing_tags, module.params['tags'], module.params['purge_tags']) + to_update, to_delete = compare_aws_tags(existing_tags, module.params["tags"], module.params["purge_tags"]) if not bool(to_delete or to_update): return False @@ -157,8 +162,7 @@ def update_tags(client, module, topic_arn): if to_update: try: - client.tag_resource(ResourceArn=topic_arn, - Tags=ansible_dict_to_boto3_tag_list(to_update)) + client.tag_resource(ResourceArn=topic_arn, Tags=ansible_dict_to_boto3_tag_list(to_update)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to topic") if to_delete: diff --git a/ansible_collections/community/aws/plugins/module_utils/transitgateway.py b/ansible_collections/community/aws/plugins/module_utils/transitgateway.py index 3ec198abd..5f0e934d1 100644 --- a/ansible_collections/community/aws/plugins/module_utils/transitgateway.py +++ b/ansible_collections/community/aws/plugins/module_utils/transitgateway.py @@ -1,14 +1,13 @@ +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - from copy import deepcopy -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.community.aws.plugins.module_utils.ec2 import BaseEc2Manager from ansible_collections.community.aws.plugins.module_utils.ec2 import Boto3Mixin @@ -22,21 +21,43 @@ class TgwWaiterFactory(Ec2WaiterFactory): # split the TGW waiters so we can keep them close to everything else. tgw_data = dict( tgw_attachment_available=dict( - operation='DescribeTransitGatewayAttachments', - delay=5, maxAttempts=120, + operation="DescribeTransitGatewayAttachments", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='success', matcher='pathAll', expected='available', argument='TransitGatewayAttachments[].State'), - ] + dict( + state="success", + matcher="pathAll", + expected="available", + argument="TransitGatewayAttachments[].State", + ), + ], ), tgw_attachment_deleted=dict( - operation='DescribeTransitGatewayAttachments', - delay=5, maxAttempts=120, + operation="DescribeTransitGatewayAttachments", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='retry', matcher='pathAll', expected='deleting', argument='TransitGatewayAttachments[].State'), - dict(state='success', matcher='pathAll', expected='deleted', argument='TransitGatewayAttachments[].State'), - dict(state='success', matcher='path', expected=True, argument='length(TransitGatewayAttachments[]) == `0`'), - dict(state='success', matcher='error', expected='InvalidRouteTableID.NotFound'), - ] + dict( + state="retry", + matcher="pathAll", + expected="deleting", + argument="TransitGatewayAttachments[].State", + ), + dict( + state="success", + matcher="pathAll", + expected="deleted", + argument="TransitGatewayAttachments[].State", + ), + dict( + state="success", + matcher="path", + expected=True, + argument="length(TransitGatewayAttachments[]) == `0`", + ), + dict(state="success", matcher="error", expected="InvalidRouteTableID.NotFound"), + ], ), ) data.update(tgw_data) @@ -52,40 +73,40 @@ class TGWAttachmentBoto3Mixin(Boto3Mixin): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_describe_transit_gateway_vpc_attachments(self, **params): - paginator = self.client.get_paginator('describe_transit_gateway_vpc_attachments') + paginator = self.client.get_paginator("describe_transit_gateway_vpc_attachments") return paginator.paginate(**params).build_full_result() - @Boto3Mixin.aws_error_handler('describe transit gateway attachments') + @Boto3Mixin.aws_error_handler("describe transit gateway attachments") def _describe_vpc_attachments(self, **params): result = self._paginated_describe_transit_gateway_vpc_attachments(**params) - return result.get('TransitGatewayVpcAttachments', None) + return result.get("TransitGatewayVpcAttachments", None) - @Boto3Mixin.aws_error_handler('create transit gateway attachment') + @Boto3Mixin.aws_error_handler("create transit gateway attachment") def _create_vpc_attachment(self, **params): result = self.client.create_transit_gateway_vpc_attachment(aws_retry=True, **params) - return result.get('TransitGatewayVpcAttachment', None) + return result.get("TransitGatewayVpcAttachment", None) - @Boto3Mixin.aws_error_handler('modify transit gateway attachment') + @Boto3Mixin.aws_error_handler("modify transit gateway attachment") def _modify_vpc_attachment(self, **params): result = self.client.modify_transit_gateway_vpc_attachment(aws_retry=True, **params) - return result.get('TransitGatewayVpcAttachment', None) + return result.get("TransitGatewayVpcAttachment", None) - @Boto3Mixin.aws_error_handler('delete transit gateway attachment') + @Boto3Mixin.aws_error_handler("delete transit gateway attachment") def _delete_vpc_attachment(self, **params): try: result = self.client.delete_transit_gateway_vpc_attachment(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('TransitGatewayVpcAttachment', None) + return result.get("TransitGatewayVpcAttachment", None) - @Boto3Mixin.aws_error_handler('transit gateway attachment to finish deleting') + @Boto3Mixin.aws_error_handler("transit gateway attachment to finish deleting") def _wait_tgw_attachment_deleted(self, **params): - waiter = self.tgw_waiter_factory.get_waiter('tgw_attachment_deleted') + waiter = self.tgw_waiter_factory.get_waiter("tgw_attachment_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('transit gateway attachment to become available') + @Boto3Mixin.aws_error_handler("transit gateway attachment to become available") def _wait_tgw_attachment_available(self, **params): - waiter = self.tgw_waiter_factory.get_waiter('tgw_attachment_available') + waiter = self.tgw_waiter_factory.get_waiter("tgw_attachment_available") waiter.wait(**params) def _normalize_tgw_attachment(self, rtb): @@ -104,11 +125,10 @@ class TGWAttachmentBoto3Mixin(Boto3Mixin): class BaseTGWManager(BaseEc2Manager): - - @Boto3Mixin.aws_error_handler('connect to AWS') - def _create_client(self, client_name='ec2'): - if client_name == 'ec2': - error_codes = ['IncorrectState'] + @Boto3Mixin.aws_error_handler("connect to AWS") + def _create_client(self, client_name="ec2"): + if client_name == "ec2": + error_codes = ["IncorrectState"] else: error_codes = [] @@ -120,8 +140,7 @@ class BaseTGWManager(BaseEc2Manager): class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager): - - TAG_RESOURCE_TYPE = 'transit-gateway-attachment' + TAG_RESOURCE_TYPE = "transit-gateway-attachment" def __init__(self, module, id=None): self._subnet_updates = dict() @@ -132,7 +151,7 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager id = self.resource_id if not id: # Users should never see this, but let's cover ourself - self.module.fail_json(msg='Attachment identifier parameter missing') + self.module.fail_json(msg="Attachment identifier parameter missing") if id_list: return dict(TransitGatewayAttachmentIds=[id]) @@ -141,18 +160,18 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager def _extra_error_output(self): output = super(TransitGatewayVpcAttachmentManager, self)._extra_error_output() if self.resource_id: - output['TransitGatewayAttachmentId'] = self.resource_id + output["TransitGatewayAttachmentId"] = self.resource_id return output def _filter_immutable_resource_attributes(self, resource): resource = super(TransitGatewayVpcAttachmentManager, self)._filter_immutable_resource_attributes(resource) - resource.pop('TransitGatewayId', None) - resource.pop('VpcId', None) - resource.pop('VpcOwnerId', None) - resource.pop('State', None) - resource.pop('SubnetIds', None) - resource.pop('CreationTime', None) - resource.pop('Tags', None) + resource.pop("TransitGatewayId", None) + resource.pop("VpcId", None) + resource.pop("VpcOwnerId", None) + resource.pop("State", None) + resource.pop("SubnetIds", None) + resource.pop("CreationTime", None) + resource.pop("Tags", None) return resource def _set_option(self, name, value): @@ -160,36 +179,36 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager return False # For now VPC Attachment options are all enable/disable if value: - value = 'enable' + value = "enable" else: - value = 'disable' + value = "disable" - options = deepcopy(self._preupdate_resource.get('Options', dict())) - options.update(self._resource_updates.get('Options', dict())) + options = deepcopy(self._preupdate_resource.get("Options", dict())) + options.update(self._resource_updates.get("Options", dict())) options[name] = value - return self._set_resource_value('Options', options) + return self._set_resource_value("Options", options) def set_dns_support(self, value): - return self._set_option('DnsSupport', value) + return self._set_option("DnsSupport", value) def set_ipv6_support(self, value): - return self._set_option('Ipv6Support', value) + return self._set_option("Ipv6Support", value) def set_appliance_mode_support(self, value): - return self._set_option('ApplianceModeSupport', value) + return self._set_option("ApplianceModeSupport", value) def set_transit_gateway(self, tgw_id): - return self._set_resource_value('TransitGatewayId', tgw_id) + return self._set_resource_value("TransitGatewayId", tgw_id) def set_vpc(self, vpc_id): - return self._set_resource_value('VpcId', vpc_id) + return self._set_resource_value("VpcId", vpc_id) def set_subnets(self, subnets=None, purge=True): if subnets is None: return False - current_subnets = set(self._preupdate_resource.get('SubnetIds', [])) + current_subnets = set(self._preupdate_resource.get("SubnetIds", [])) desired_subnets = set(subnets) if not purge: desired_subnets = desired_subnets.union(current_subnets) @@ -198,21 +217,23 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager # information we 'know'. subnet_details = self._describe_subnets(SubnetIds=list(desired_subnets)) vpc_id = self.subnets_to_vpc(desired_subnets, subnet_details) - self._set_resource_value('VpcId', vpc_id, immutable=True) + self._set_resource_value("VpcId", vpc_id, immutable=True) # Only one subnet per-AZ is permitted - azs = [s.get('AvailabilityZoneId') for s in subnet_details] + azs = [s.get("AvailabilityZoneId") for s in subnet_details] if len(azs) != len(set(azs)): self.module.fail_json( - msg='Only one attachment subnet per availability zone may be set.', - availability_zones=azs, subnets=subnet_details) + msg="Only one attachment subnet per availability zone may be set.", + availability_zones=azs, + subnets=subnet_details, + ) subnets_to_add = list(desired_subnets.difference(current_subnets)) subnets_to_remove = list(current_subnets.difference(desired_subnets)) if not subnets_to_remove and not subnets_to_add: return False self._subnet_updates = dict(add=subnets_to_add, remove=subnets_to_remove) - self._set_resource_value('SubnetIds', list(desired_subnets)) + self._set_resource_value("SubnetIds", list(desired_subnets)) return True def subnets_to_vpc(self, subnets, subnet_details=None): @@ -222,11 +243,13 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager if subnet_details is None: subnet_details = self._describe_subnets(SubnetIds=list(subnets)) - vpcs = [s.get('VpcId') for s in subnet_details] + vpcs = [s.get("VpcId") for s in subnet_details] if len(set(vpcs)) > 1: self.module.fail_json( - msg='Attachment subnets may only be in one VPC, multiple VPCs found', - vpcs=list(set(vpcs)), subnets=subnet_details) + msg="Attachment subnets may only be in one VPC, multiple VPCs found", + vpcs=list(set(vpcs)), + subnets=subnet_details, + ) return vpcs[0] @@ -249,26 +272,25 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager params = self._merge_resource_changes(filter_immutable=False, creation=True) response = self._create_vpc_attachment(**params) if response: - self.resource_id = response.get('TransitGatewayAttachmentId', None) + self.resource_id = response.get("TransitGatewayAttachmentId", None) return response def _do_update_resource(self): - if self._preupdate_resource.get('State', None) == 'pending': + if self._preupdate_resource.get("State", None) == "pending": # Resources generally don't like it if you try to update before creation # is complete. If things are in a 'pending' state they'll often throw # exceptions. self._wait_for_creation() - elif self._preupdate_resource.get('State', None) == 'deleting': - self.module.fail_json(msg='Deletion in progress, unable to update', - route_tables=[self.original_resource]) + elif self._preupdate_resource.get("State", None) == "deleting": + self.module.fail_json(msg="Deletion in progress, unable to update", route_tables=[self.original_resource]) updates = self._filter_immutable_resource_attributes(self._resource_updates) - subnets_to_add = self._subnet_updates.get('add', []) - subnets_to_remove = self._subnet_updates.get('remove', []) + subnets_to_add = self._subnet_updates.get("add", []) + subnets_to_remove = self._subnet_updates.get("remove", []) if subnets_to_add: - updates['AddSubnetIds'] = subnets_to_add + updates["AddSubnetIds"] = subnets_to_add if subnets_to_remove: - updates['RemoveSubnetIds'] = subnets_to_remove + updates["RemoveSubnetIds"] = subnets_to_remove if not updates: return False @@ -284,7 +306,6 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager return self.get_attachment() def delete(self, id=None): - if id: id_params = self._get_id_params(id=id, id_list=True) result = self._get_tgw_vpc_attachment(**id_params) @@ -296,7 +317,7 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager if not result: return False - if result.get('State') == 'deleting': + if result.get("State") == "deleting": self._wait_for_deletion() return False @@ -316,9 +337,9 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager def list(self, filters=None, id=None): params = dict() if id: - params['TransitGatewayAttachmentIds'] = [id] + params["TransitGatewayAttachmentIds"] = [id] if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) attachments = self._describe_vpc_attachments(**params) if not attachments: return list() @@ -326,7 +347,6 @@ class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager return [self._normalize_tgw_attachment(a) for a in attachments] def get_attachment(self, id=None): - # RouteTable needs a list, Association/Propagation needs a single ID id_params = self._get_id_params(id=id, id_list=True) id_param = self._get_id_params(id=id, id_list=False) diff --git a/ansible_collections/community/aws/plugins/module_utils/wafv2.py b/ansible_collections/community/aws/plugins/module_utils/wafv2.py index 18f19974d..c0eb363ef 100644 --- a/ansible_collections/community/aws/plugins/module_utils/wafv2.py +++ b/ansible_collections/community/aws/plugins/module_utils/wafv2.py @@ -1,12 +1,15 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @@ -16,7 +19,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_ def _list_tags(wafv2, arn, fail_json_aws, next_marker=None): params = dict(ResourceARN=arn) if next_marker: - params['NextMarker'] = next_marker + params["NextMarker"] = next_marker try: return wafv2.list_tags_for_resource(**params) except (BotoCoreError, ClientError) as e: @@ -29,9 +32,9 @@ def describe_wafv2_tags(wafv2, arn, fail_json_aws): # there is currently no paginator for wafv2 while True: responce = _list_tags(wafv2, arn, fail_json_aws) - next_marker = responce.get('NextMarker', None) - tag_info = responce.get('TagInfoForResource', {}) - tag_list.extend(tag_info.get('TagList', [])) + next_marker = responce.get("NextMarker", None) + tag_info = responce.get("TagInfoForResource", {}) + tag_list.extend(tag_info.get("TagList", [])) if not next_marker: break return boto3_tag_list_to_ansible_dict(tag_list) @@ -66,39 +69,37 @@ def ensure_wafv2_tags(wafv2, arn, tags, purge_tags, fail_json_aws, check_mode): def wafv2_list_web_acls(wafv2, scope, fail_json_aws, nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': scope, - 'Limit': 100 - } + req_obj = {"Scope": scope, "Limit": 100} if nextmarker: - req_obj['NextMarker'] = nextmarker + req_obj["NextMarker"] = nextmarker try: response = wafv2.list_web_acls(**req_obj) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 web acl") - if response.get('NextMarker'): - response['WebACLs'] += wafv2_list_web_acls(wafv2, scope, fail_json_aws, nextmarker=response.get('NextMarker')).get('WebACLs') + if response.get("NextMarker"): + response["WebACLs"] += wafv2_list_web_acls( + wafv2, scope, fail_json_aws, nextmarker=response.get("NextMarker") + ).get("WebACLs") return response def wafv2_list_rule_groups(wafv2, scope, fail_json_aws, nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': scope, - 'Limit': 100 - } + req_obj = {"Scope": scope, "Limit": 100} if nextmarker: - req_obj['NextMarker'] = nextmarker + req_obj["NextMarker"] = nextmarker try: response = wafv2.list_rule_groups(**req_obj) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 rule group") - if response.get('NextMarker'): - response['RuleGroups'] += wafv2_list_rule_groups(wafv2, scope, fail_json_aws, nextmarker=response.get('NextMarker')).get('RuleGroups') + if response.get("NextMarker"): + response["RuleGroups"] += wafv2_list_rule_groups( + wafv2, scope, fail_json_aws, nextmarker=response.get("NextMarker") + ).get("RuleGroups") return response @@ -109,20 +110,20 @@ def wafv2_snake_dict_to_camel_dict(a): retval = {} for item in a.keys(): if isinstance(a.get(item), dict): - if 'Ip' in item: - retval[item.replace('Ip', 'IP')] = wafv2_snake_dict_to_camel_dict(a.get(item)) - elif 'Arn' == item: - retval['ARN'] = wafv2_snake_dict_to_camel_dict(a.get(item)) + if "Ip" in item: + retval[item.replace("Ip", "IP")] = wafv2_snake_dict_to_camel_dict(a.get(item)) + elif "Arn" == item: + retval["ARN"] = wafv2_snake_dict_to_camel_dict(a.get(item)) else: retval[item] = wafv2_snake_dict_to_camel_dict(a.get(item)) elif isinstance(a.get(item), list): retval[item] = [] for idx in range(len(a.get(item))): retval[item].append(wafv2_snake_dict_to_camel_dict(a.get(item)[idx])) - elif 'Ip' in item: - retval[item.replace('Ip', 'IP')] = a.get(item) - elif 'Arn' == item: - retval['ARN'] = a.get(item) + elif "Ip" in item: + retval[item.replace("Ip", "IP")] = a.get(item) + elif "Arn" == item: + retval["ARN"] = a.get(item) else: retval[item] = a.get(item) return retval @@ -135,24 +136,31 @@ def nested_byte_values_to_strings(rule, keyname): - AndStatement - NotStatement """ - if rule.get('Statement', {}).get(keyname): - for idx in range(len(rule.get('Statement', {}).get(keyname, {}).get('Statements'))): - if rule['Statement'][keyname]['Statements'][idx].get('ByteMatchStatement'): - rule['Statement'][keyname]['Statements'][idx]['ByteMatchStatement']['SearchString'] = \ - rule.get('Statement').get(keyname).get('Statements')[idx].get('ByteMatchStatement').get('SearchString').decode('utf-8') + if rule.get("Statement", {}).get(keyname): + for idx in range(len(rule.get("Statement", {}).get(keyname, {}).get("Statements"))): + if rule["Statement"][keyname]["Statements"][idx].get("ByteMatchStatement"): + rule["Statement"][keyname]["Statements"][idx]["ByteMatchStatement"]["SearchString"] = ( + rule.get("Statement") + .get(keyname) + .get("Statements")[idx] + .get("ByteMatchStatement") + .get("SearchString") + .decode("utf-8") + ) return rule def byte_values_to_strings_before_compare(rules): for idx in range(len(rules)): - if rules[idx].get('Statement', {}).get('ByteMatchStatement', {}).get('SearchString'): - rules[idx]['Statement']['ByteMatchStatement']['SearchString'] = \ - rules[idx].get('Statement').get('ByteMatchStatement').get('SearchString').decode('utf-8') + if rules[idx].get("Statement", {}).get("ByteMatchStatement", {}).get("SearchString"): + rules[idx]["Statement"]["ByteMatchStatement"]["SearchString"] = ( + rules[idx].get("Statement").get("ByteMatchStatement").get("SearchString").decode("utf-8") + ) else: - for statement in ['AndStatement', 'OrStatement', 'NotStatement']: - if rules[idx].get('Statement', {}).get(statement): + for statement in ["AndStatement", "OrStatement", "NotStatement"]: + if rules[idx].get("Statement", {}).get(statement): rules[idx] = nested_byte_values_to_strings(rules[idx], statement) return rules @@ -160,11 +168,11 @@ def byte_values_to_strings_before_compare(rules): def compare_priority_rules(existing_rules, requested_rules, purge_rules, state): diff = False - existing_rules = sorted(existing_rules, key=lambda k: k['Priority']) + existing_rules = sorted(existing_rules, key=lambda k: k["Priority"]) existing_rules = byte_values_to_strings_before_compare(existing_rules) - requested_rules = sorted(requested_rules, key=lambda k: k['Priority']) + requested_rules = sorted(requested_rules, key=lambda k: k["Priority"]) - if purge_rules and state == 'present': + if purge_rules and state == "present": merged_rules = requested_rules if len(existing_rules) == len(requested_rules): for idx in range(len(existing_rules)): @@ -182,8 +190,8 @@ def compare_priority_rules(existing_rules, requested_rules, purge_rules, state): ex_idx_pop = [] for existing_idx in range(len(existing_rules)): for requested_idx in range(len(requested_rules)): - if existing_rules[existing_idx].get('Priority') == requested_rules[requested_idx].get('Priority'): - if state == 'present': + if existing_rules[existing_idx].get("Priority") == requested_rules[requested_idx].get("Priority"): + if state == "present": ex_idx_pop.append(existing_idx) if existing_rules[existing_idx] != requested_rules[requested_idx]: diff = True @@ -195,7 +203,7 @@ def compare_priority_rules(existing_rules, requested_rules, purge_rules, state): for idx in ex_idx_pop: existing_rules.pop(idx) - if state == 'present': + if state == "present": merged_rules = existing_rules + requested_rules if len(merged_rules) != prev_count: diff --git a/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py b/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py index e589d0cb0..fab777175 100644 --- a/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py +++ b/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: accessanalyzer_validate_policy_info version_added: 5.0.0 @@ -63,19 +61,19 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Validate a policy - name: Validate a simple IAM policy community.aws.accessanalyzer_validate_policy_info: policy: "{{ lookup('template', 'managed_policy.json.j2') }}" -''' +""" -RETURN = r''' +RETURN = r""" findings: description: The list of findings in a policy returned by IAM Access Analyzer based on its suite of policy checks. returned: success @@ -160,7 +158,7 @@ findings: description: The offset within the policy that corresponds to the position, starting from C(0). type: int returned: success -''' +""" try: import botocore @@ -169,8 +167,9 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def filter_findings(findings, type_filter): @@ -178,11 +177,10 @@ def filter_findings(findings, type_filter): return findings # Convert type_filter to the findingType strings returned by the API - filter_map = dict(error='ERROR', security='SECURITY_WARNING', - suggestion='SUGGESTION', warning='WARNING') + filter_map = dict(error="ERROR", security="SECURITY_WARNING", suggestion="SUGGESTION", warning="WARNING") allowed_types = [filter_map[t] for t in type_filter] - filtered_results = [f for f in findings if f.get('findingType', None) in allowed_types] + filtered_results = [f for f in findings if f.get("findingType", None) in allowed_types] return filtered_results @@ -191,47 +189,47 @@ def main(): # values are likely to be expanded, let's avoid hard coding limits which might not hold true in # the long term... argument_spec = dict( - policy=dict(required=True, type='json', aliases=['policy_document']), - locale=dict(required=False, type='str', default='EN'), - policy_type=dict(required=False, type='str', default='identity', - choices=['identity', 'resource', 'service_control']), - resource_type=dict(required=False, type='str'), - results_filter=dict(required=False, type='list', elements='str', - choices=['error', 'security', 'suggestion', 'warning']), + policy=dict(required=True, type="json", aliases=["policy_document"]), + locale=dict(required=False, type="str", default="EN"), + policy_type=dict( + required=False, type="str", default="identity", choices=["identity", "resource", "service_control"] + ), + resource_type=dict(required=False, type="str"), + results_filter=dict( + required=False, type="list", elements="str", choices=["error", "security", "suggestion", "warning"] + ), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - policy_type_map = dict(identity='IDENTITY_POLICY', resource='RESOURCE_POLICY', - service_control='SERVICE_CONTROL_POLICY') + policy_type_map = dict( + identity="IDENTITY_POLICY", resource="RESOURCE_POLICY", service_control="SERVICE_CONTROL_POLICY" + ) - policy = module.params.get('policy') - policy_type = policy_type_map[module.params.get('policy_type')] - locale = module.params.get('locale').upper() - resource_type = module.params.get('resource_type') - results_filter = module.params.get('results_filter') + policy = module.params.get("policy") + policy_type = policy_type_map[module.params.get("policy_type")] + locale = module.params.get("locale").upper() + resource_type = module.params.get("resource_type") + results_filter = module.params.get("results_filter") try: - client = module.client('accessanalyzer', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("accessanalyzer", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") params = dict(locale=locale, policyDocument=policy, policyType=policy_type) - if policy_type == 'RESOURCE_POLICY' and resource_type: - params['policyType'] = resource_type + if policy_type == "RESOURCE_POLICY" and resource_type: + params["policyType"] = resource_type results = client.validate_policy(aws_retry=True, **params) - findings = filter_findings(results.get('findings', []), results_filter) - results['findings'] = findings + findings = filter_findings(results.get("findings", []), results_filter) + results["findings"] = findings results = camel_dict_to_snake_dict(results) module.exit_json(changed=False, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/acm_certificate.py b/ansible_collections/community/aws/plugins/modules/acm_certificate.py index abdecadcc..0b4f7037a 100644 --- a/ansible_collections/community/aws/plugins/modules/acm_certificate.py +++ b/ansible_collections/community/aws/plugins/modules/acm_certificate.py @@ -1,31 +1,14 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# + # Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see <http://www.gnu.org/licenses/>. -# + # Author: # - Matthew Davis <Matthew.Davis.2@team.telstra.com> # on behalf of Telstra Corporation Limited -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: acm_certificate short_description: Upload and delete certificates in the AWS Certificate Manager service @@ -175,23 +158,23 @@ notes: author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: upload a self-signed certificate - community.aws.aws_acm: + community.aws.acm_certificate: certificate: "{{ lookup('file', 'cert.pem' ) }}" privateKey: "{{ lookup('file', 'key.pem' ) }}" name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert" region: ap-southeast-2 # AWS region - name: create/update a certificate with a chain - community.aws.aws_acm: + community.aws.acm_certificate: certificate: "{{ lookup('file', 'cert.pem' ) }}" private_key: "{{ lookup('file', 'key.pem' ) }}" name_tag: my_cert @@ -205,34 +188,34 @@ EXAMPLES = ''' var: cert_create.certificate.arn - name: delete the cert we just created - community.aws.aws_acm: + community.aws.acm_certificate: name_tag: my_cert state: absent region: ap-southeast-2 - name: delete a certificate with a particular ARN - community.aws.aws_acm: + community.aws.acm_certificate: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" state: absent region: ap-southeast-2 - name: delete all certificates with a particular domain name - community.aws.aws_acm: + community.aws.acm_certificate: domain_name: acm.ansible.com state: absent region: ap-southeast-2 - name: add tags to an existing certificate with a particular ARN - community.aws.aws_acm: + community.aws.acm_certificate: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" tags: Name: my_certificate Application: search Environment: development purge_tags: true -''' +""" -RETURN = ''' +RETURN = r""" certificate: description: Information about the certificate which was uploaded type: complex @@ -255,27 +238,27 @@ arns: returned: when I(state=absent) sample: - "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" -''' +""" import base64 -from copy import deepcopy import re # regex library +from copy import deepcopy try: import botocore except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, -) from ansible.module_utils._text import to_text +from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): if tags is None: @@ -293,12 +276,10 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't add tags to certificate {0}".format(resource_arn) - ) + module.fail_json_aws(e, f"Couldn't add tags to certificate {resource_arn}") if tags_to_remove and not module.check_mode: # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys. - tags_list = [{'Key': key, 'Value': existing_tags.get(key)} for key in tags_to_remove] + tags_list = [{"Key": key, "Value": existing_tags.get(key)} for key in tags_to_remove] try: client.remove_tags_from_certificate( CertificateArn=resource_arn, @@ -308,9 +289,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't remove tags from certificate {0}".format(resource_arn) - ) + module.fail_json_aws(e, f"Couldn't remove tags from certificate {resource_arn}") new_tags = deepcopy(existing_tags) for key, value in tags_to_add.items(): new_tags[key] = value @@ -325,7 +304,6 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): # May include some lines between each chain in the cert, e.g. "Subject: ..." # Returns True iff the chains/certs are functionally identical (including chain order) def chain_compare(module, a, b): - chain_a_pem = pem_chain_split(module, a) chain_b_pem = pem_chain_split(module, b) @@ -333,7 +311,7 @@ def chain_compare(module, a, b): return False # Chain length is the same - for (ca, cb) in zip(chain_a_pem, chain_b_pem): + for ca, cb in zip(chain_a_pem, chain_b_pem): der_a = PEM_body_to_DER(module, ca) der_b = PEM_body_to_DER(module, cb) if der_a != der_b: @@ -353,7 +331,9 @@ def PEM_body_to_DER(module, pem): # Store this globally to avoid repeated recompilation -pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?") +pem_chain_split_regex = re.compile( + r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?" +) # Use regex to split up a chain or single cert into an array of base64 encoded data @@ -361,7 +341,6 @@ pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?( # Noting that some chains have non-pem data in between each cert # This function returns only what's between the headers, excluding the headers def pem_chain_split(module, pem): - pem_arr = re.findall(pem_chain_split_regex, to_text(pem)) if len(pem_arr) == 0: @@ -376,53 +355,55 @@ def update_imported_certificate(client, module, acm, old_cert, desired_tags): Update the existing certificate that was previously imported in ACM. """ module.debug("Existing certificate found in ACM") - if ('tags' not in old_cert) or ('Name' not in old_cert['tags']): + if ("tags" not in old_cert) or ("Name" not in old_cert["tags"]): # shouldn't happen module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert) - if module.params.get('name_tag') is not None and (old_cert['tags']['Name'] != module.params.get('name_tag')): + if module.params.get("name_tag") is not None and (old_cert["tags"]["Name"] != module.params.get("name_tag")): # This could happen if the user identified the certificate using 'certificate_arn' or 'domain_name', # and the 'Name' tag in the AWS API does not match the ansible 'name_tag'. module.fail_json(msg="Internal error, Name tag does not match", certificate=old_cert) - if 'certificate' not in old_cert: + if "certificate" not in old_cert: # shouldn't happen module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert) cert_arn = None # Are the existing certificate in ACM and the local certificate the same? same = True - if module.params.get('certificate') is not None: - same &= chain_compare(module, old_cert['certificate'], module.params['certificate']) - if module.params['certificate_chain']: + if module.params.get("certificate") is not None: + same &= chain_compare(module, old_cert["certificate"], module.params["certificate"]) + if module.params["certificate_chain"]: # Need to test this # not sure if Amazon appends the cert itself to the chain when self-signed - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain']) + same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate_chain"]) else: # When there is no chain with a cert # it seems Amazon returns the cert itself as the chain - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate']) + same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate"]) if same: module.debug("Existing certificate in ACM is the same") - cert_arn = old_cert['certificate_arn'] + cert_arn = old_cert["certificate_arn"] changed = False else: - absent_args = ['certificate', 'name_tag', 'private_key'] + absent_args = ["certificate", "name_tag", "private_key"] if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json(msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.fail_json( + msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" + ) module.debug("Existing certificate in ACM is different, overwriting") changed = True if module.check_mode: - cert_arn = old_cert['certificate_arn'] + cert_arn = old_cert["certificate_arn"] # note: returned domain will be the domain of the previous cert else: # update cert in ACM cert_arn = acm.import_certificate( client, module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], - arn=old_cert['certificate_arn'], + certificate=module.params["certificate"], + private_key=module.params["private_key"], + certificate_chain=module.params["certificate_chain"], + arn=old_cert["certificate_arn"], tags=desired_tags, ) return (changed, cert_arn) @@ -433,22 +414,24 @@ def import_certificate(client, module, acm, desired_tags): Import a certificate to ACM. """ # Validate argument requirements - absent_args = ['certificate', 'name_tag', 'private_key'] + absent_args = ["certificate", "name_tag", "private_key"] cert_arn = None if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json(msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.fail_json( + msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" + ) module.debug("No certificate in ACM. Creating new one.") changed = True if module.check_mode: - domain = 'example.com' + domain = "example.com" module.exit_json(certificate=dict(domain_name=domain), changed=True) else: cert_arn = acm.import_certificate( client, module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], + certificate=module.params["certificate"], + private_key=module.params["private_key"], + certificate_chain=module.params["certificate_chain"], tags=desired_tags, ) return (changed, cert_arn) @@ -458,7 +441,7 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, cert_arn = None changed = False if len(certificates) > 1: - msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag'] + msg = f"More than one certificate with Name={module.params['name_tag']} exists in ACM in this region" module.fail_json(msg=msg, certificates=certificates) elif len(certificates) == 1: # Update existing certificate that was previously imported to ACM. @@ -469,11 +452,13 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, # Add/remove tags to/from certificate try: - existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_certificate(CertificateArn=cert_arn)['Tags']) + existing_tags = boto3_tag_list_to_ansible_dict( + client.list_tags_for_certificate(CertificateArn=cert_arn)["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for certificate") - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") (c, new_tags) = ensure_tags(client, module, cert_arn, existing_tags, desired_tags, purge_tags) changed |= c domain = acm.get_domain_of_cert(client=client, module=module, arn=cert_arn) @@ -483,21 +468,21 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, def ensure_certificates_absent(client, module, acm, certificates): for cert in certificates: if not module.check_mode: - acm.delete_certificate(client, module, cert['certificate_arn']) - module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0)) + acm.delete_certificate(client, module, cert["certificate_arn"]) + module.exit_json(arns=[cert["certificate_arn"] for cert in certificates], changed=(len(certificates) > 0)) def main(): argument_spec = dict( certificate=dict(), - certificate_arn=dict(aliases=['arn']), + certificate_arn=dict(aliases=["arn"]), certificate_chain=dict(), - domain_name=dict(aliases=['domain']), - name_tag=dict(aliases=['name']), + domain_name=dict(aliases=["domain"]), + name_tag=dict(aliases=["name"]), private_key=dict(no_log=True), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -506,62 +491,66 @@ def main(): acm = ACMServiceManager(module) # Check argument requirements - if module.params['state'] == 'present': + if module.params["state"] == "present": # at least one of these should be specified. - absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) < 1: for a in absent_args: - module.debug("%s is %s" % (a, module.params[a])) - module.fail_json(msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") + module.debug(f"{a} is {module.params[a]}") + module.fail_json( + msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" + ) else: # absent # exactly one of these should be specified - absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) != 1: for a in absent_args: - module.debug("%s is %s" % (a, module.params[a])) - module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") + module.debug(f"{a} is {module.params[a]}") + module.fail_json( + msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" + ) filter_tags = None desired_tags = None - if module.params.get('tags') is not None: - desired_tags = module.params['tags'] + if module.params.get("tags") is not None: + desired_tags = module.params["tags"] else: # Because we're setting the Name tag, we need to explicitly not purge when tags isn't passed - module.params['purge_tags'] = False - if module.params.get('name_tag') is not None: + module.params["purge_tags"] = False + if module.params.get("name_tag") is not None: # The module was originally implemented to filter certificates based on the 'Name' tag. # Other tags are not used to filter certificates. # It would make sense to replace the existing name_tag, domain, certificate_arn attributes # with a 'filter' attribute, but that would break backwards-compatibility. - filter_tags = dict(Name=module.params['name_tag']) + filter_tags = dict(Name=module.params["name_tag"]) if desired_tags is not None: - if 'Name' in desired_tags: - if desired_tags['Name'] != module.params['name_tag']: + if "Name" in desired_tags: + if desired_tags["Name"] != module.params["name_tag"]: module.fail_json(msg="Value of 'name_tag' conflicts with value of 'tags.Name'") else: - desired_tags['Name'] = module.params['name_tag'] + desired_tags["Name"] = module.params["name_tag"] else: desired_tags = deepcopy(filter_tags) - client = module.client('acm') + client = module.client("acm") # fetch the list of certificates currently in ACM certificates = acm.get_certificates( client=client, module=module, - domain_name=module.params['domain_name'], - arn=module.params['certificate_arn'], + domain_name=module.params["domain_name"], + arn=module.params["certificate_arn"], only_tags=filter_tags, ) - module.debug("Found %d corresponding certificates in ACM" % len(certificates)) - if module.params['state'] == 'present': + module.debug(f"Found {len(certificates)} corresponding certificates in ACM") + if module.params["state"] == "present": ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags) else: # state == absent ensure_certificates_absent(client, module, acm, certificates) -if __name__ == '__main__': +if __name__ == "__main__": # tests() main() diff --git a/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py b/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py index a84d7c0b0..73da208f1 100644 --- a/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py +++ b/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: acm_certificate_info short_description: Retrieve certificate information from AWS Certificate Manager service version_added: 1.0.0 @@ -43,26 +41,26 @@ options: author: - Will Thames (@willthames) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: obtain all ACM certificates - community.aws.aws_acm_info: + community.aws.acm_certificate_info: - name: obtain all information for a single ACM certificate - community.aws.aws_acm_info: + community.aws.acm_certificate_info: domain_name: "*.example_com" - name: obtain all certificates pending validation - community.aws.aws_acm_info: + community.aws.acm_certificate_info: statuses: - - PENDING_VALIDATION + - PENDING_VALIDATION - name: obtain all certificates with tag Name=foo and myTag=bar - community.aws.aws_acm_info: + community.aws.acm_certificate_info: tags: Name: foo myTag: bar @@ -70,12 +68,11 @@ EXAMPLES = r''' # The output is still a list of certificates, just one item long. - name: obtain information about a certificate with a particular ARN - community.aws.aws_acm_info: - certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" + community.aws.acm_certificate_info: + certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" +""" -''' - -RETURN = r''' +RETURN = r""" certificates: description: A list of certificates returned: always @@ -257,39 +254,51 @@ certificates: returned: always sample: AMAZON_ISSUED type: str -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def main(): argument_spec = dict( - certificate_arn=dict(aliases=['arn']), - domain_name=dict(aliases=['name']), + certificate_arn=dict(aliases=["arn"]), + domain_name=dict(aliases=["name"]), statuses=dict( - type='list', - elements='str', - choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED'] + type="list", + elements="str", + choices=[ + "PENDING_VALIDATION", + "ISSUED", + "INACTIVE", + "EXPIRED", + "VALIDATION_TIMED_OUT", + "REVOKED", + "FAILED", + ], ), - tags=dict(type='dict'), + tags=dict(type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) acm_info = ACMServiceManager(module) - client = module.client('acm') + client = module.client("acm") - certificates = acm_info.get_certificates(client, module, - domain_name=module.params['domain_name'], - statuses=module.params['statuses'], - arn=module.params['certificate_arn'], - only_tags=module.params['tags']) + certificates = acm_info.get_certificates( + client, + module, + domain_name=module.params["domain_name"], + statuses=module.params["statuses"], + arn=module.params["certificate_arn"], + only_tags=module.params["tags"], + ) - if module.params['certificate_arn'] and len(certificates) != 1: - module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn']) + if module.params["certificate_arn"] and len(certificates) != 1: + module.fail_json(msg=f"No certificate exists in this region with ARN {module.params['certificate_arn']}") module.exit_json(certificates=certificates) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway.py b/ansible_collections/community/aws/plugins/modules/api_gateway.py index a084bf93e..af4432387 100644 --- a/ansible_collections/community/aws/plugins/modules/api_gateway.py +++ b/ansible_collections/community/aws/plugins/modules/api_gateway.py @@ -4,11 +4,7 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: api_gateway version_added: 1.0.0 @@ -102,21 +98,34 @@ options: choices: ['EDGE', 'REGIONAL', 'PRIVATE'] type: str default: EDGE + name: + description: + - The name of the RestApi. + type: str + version_added: 6.2.0 + lookup: + description: + - Look up API gateway by either I(tags) (and I(name) if supplied) or by I(api_id). + - If I(lookup=tag) and I(tags) is not specified then no lookup for an existing API gateway + is performed and a new API gateway will be created. + - When using I(lookup=tag), multiple matches being found will result in a failure and no changes will be made. + - To change the tags of a API gateway use I(lookup=id). + default: tag + choices: [ 'tag', 'id' ] + type: str + version_added: 6.2.0 author: - 'Michael De La Rue (@mikedlr)' +notes: + - 'Tags are used to uniquely identify API gateway when the I(api_id) is not supplied. version_added=6.2.0' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 + - amazon.aws.tags +""" -notes: - - A future version of this module will probably use tags or another - ID so that an API can be created only once. - - As an early work around an intermediate version will probably do - the same using a tag embedded in the API name. -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Setup AWS API Gateway setup on AWS and deploy API definition community.aws.api_gateway: swagger_file: my_api.yml @@ -143,11 +152,22 @@ EXAMPLES = ''' swagger_file: my_api.yml cache_enabled: true cache_size: '6.1' - canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True } + canary_settings: + percentTraffic: 50.0 + deploymentId: '123' + useStageCache: true state: present -''' -RETURN = ''' +- name: Delete API gateway + amazon.aws.api_gateway: + name: ansible-rest-api + tags: + automation: ansible + lookup: tags + state: absent +""" + +RETURN = r""" api_id: description: API id of the API endpoint created returned: success @@ -168,7 +188,7 @@ resource_actions: returned: always type: list sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"] -''' +""" import json import traceback @@ -180,70 +200,134 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def main(): argument_spec = dict( - api_id=dict(type='str', required=False), - state=dict(type='str', default='present', choices=['present', 'absent']), - swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']), - swagger_dict=dict(type='json', default=None), - swagger_text=dict(type='str', default=None), - stage=dict(type='str', default=None), - deploy_desc=dict(type='str', default="Automatic deployment by Ansible."), - cache_enabled=dict(type='bool', default=False), - cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']), - stage_variables=dict(type='dict', default={}), - stage_canary_settings=dict(type='dict', default={}), - tracing_enabled=dict(type='bool', default=False), - endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']) + api_id=dict(type="str", required=False), + state=dict(type="str", default="present", choices=["present", "absent"]), + swagger_file=dict(type="path", default=None, aliases=["src", "api_file"]), + swagger_dict=dict(type="json", default=None), + swagger_text=dict(type="str", default=None), + stage=dict(type="str", default=None), + deploy_desc=dict(type="str", default="Automatic deployment by Ansible."), + cache_enabled=dict(type="bool", default=False), + cache_size=dict(type="str", default="0.5", choices=["0.5", "1.6", "6.1", "13.5", "28.4", "58.2", "118", "237"]), + stage_variables=dict(type="dict", default={}), + stage_canary_settings=dict(type="dict", default={}), + tracing_enabled=dict(type="bool", default=False), + endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), + name=dict(type="str"), + lookup=dict(type="str", choices=["tag", "id"], default="tag"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) - mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841 + mutually_exclusive = [["swagger_file", "swagger_dict", "swagger_text"]] # noqa: F841 module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=False, + supports_check_mode=True, mutually_exclusive=mutually_exclusive, ) - api_id = module.params.get('api_id') - state = module.params.get('state') # noqa: F841 - swagger_file = module.params.get('swagger_file') - swagger_dict = module.params.get('swagger_dict') - swagger_text = module.params.get('swagger_text') - endpoint_type = module.params.get('endpoint_type') + api_id = module.params.get("api_id") + state = module.params.get("state") # noqa: F841 + swagger_file = module.params.get("swagger_file") + swagger_dict = module.params.get("swagger_dict") + swagger_text = module.params.get("swagger_text") + endpoint_type = module.params.get("endpoint_type") + name = module.params.get("name") + tags = module.params.get("tags") + lookup = module.params.get("lookup") - client = module.client('apigateway') + client = module.client("apigateway") - changed = True # for now it will stay that way until we can sometimes avoid change + changed = True # for now it will stay that way until we can sometimes avoid change conf_res = None dep_res = None del_res = None if state == "present": if api_id is None: - api_id = create_empty_api(module, client, endpoint_type) - api_data = get_api_definitions(module, swagger_file=swagger_file, - swagger_dict=swagger_dict, swagger_text=swagger_text) - conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) + # lookup API gateway using tags + if tags and lookup == "tag": + rest_api = get_api_by_tags(client, module, name, tags) + if rest_api: + api_id = rest_api["id"] + if module.check_mode: + module.exit_json(changed=True, msg="Create/update operation skipped - running in check mode.") + if api_id is None: + api_data = get_api_definitions( + module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text + ) + # create new API gateway as non were provided and/or found using lookup=tag + api_id = create_empty_api(module, client, name, endpoint_type, tags) + conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + if tags: + if not conf_res: + conf_res = get_rest_api(module, client, api_id=api_id) + tag_changed, tag_result = ensure_apigateway_tags( + module, client, api_id=api_id, current_tags=conf_res.get("tags"), new_tags=tags, purge_tags=purge_tags + ) + if tag_changed: + changed |= tag_changed + conf_res = tag_result if state == "absent": + if api_id is None: + if lookup != "tag" or not tags: + module.fail_json( + msg="API gateway id must be supplied to delete API gateway or provided tag with lookup=tag to identify API gateway id." + ) + rest_api = get_api_by_tags(client, module, name, tags) + if not rest_api: + module.exit_json(changed=False, msg="No API gateway identified with tags provided") + api_id = rest_api["id"] + elif not describe_api(client, module, api_id): + module.exit_json(changed=False, msg=f"API gateway id '{api_id}' does not exist.") + + if module.check_mode: + module.exit_json(changed=True, msg="Delete operation skipped - running in check mode.", api_id=api_id) + del_res = delete_rest_api(module, client, api_id) exit_args = {"changed": changed, "api_id": api_id} if conf_res is not None: - exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res) + exit_args["configure_response"] = camel_dict_to_snake_dict(conf_res) if dep_res is not None: - exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res) + exit_args["deploy_response"] = camel_dict_to_snake_dict(dep_res) if del_res is not None: - exit_args['delete_response'] = camel_dict_to_snake_dict(del_res) + exit_args["delete_response"] = camel_dict_to_snake_dict(del_res) module.exit_json(**exit_args) +def ensure_apigateway_tags(module, client, api_id, current_tags, new_tags, purge_tags): + changed = False + tag_result = {} + tags_to_set, tags_to_delete = compare_aws_tags(current_tags, new_tags, purge_tags) + if tags_to_set or tags_to_delete: + changed = True + apigateway_arn = f"arn:aws:apigateway:{module.region}::/restapis/{api_id}" + # Remove tags from Resource + if tags_to_delete: + client.untag_resource(resourceArn=apigateway_arn, tagKeys=tags_to_delete) + # add new tags to resource + if tags_to_set: + client.tag_resource(resourceArn=apigateway_arn, tags=tags_to_set) + # Describe API gateway + tag_result = get_rest_api(module, client, api_id=api_id) + return changed, tag_result + + def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None): apidata = None if swagger_file is not None: @@ -251,7 +335,7 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te with open(swagger_file) as f: apidata = f.read() except OSError as e: - msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e)) + msg = f"Failed trying to read swagger file {str(swagger_file)}: {str(e)}" module.fail_json(msg=msg, exception=traceback.format_exc()) if swagger_dict is not None: apidata = json.dumps(swagger_dict) @@ -259,11 +343,20 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te apidata = swagger_text if apidata is None: - module.fail_json(msg='module error - no swagger info provided') + module.fail_json(msg="module error - no swagger info provided") return apidata -def create_empty_api(module, client, endpoint_type): +def get_rest_api(module, client, api_id): + try: + response = client.get_rest_api(restApiId=api_id) + response.pop("ResponseMetadata", None) + return response + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=f"failed to get REST API with api_id={api_id}") + + +def create_empty_api(module, client, name, endpoint_type, tags): """ creates a new empty API ready to be configured. The description is temporarily set to show the API as incomplete but should be @@ -271,7 +364,8 @@ def create_empty_api(module, client, endpoint_type): """ desc = "Incomplete API creation by ansible api_gateway module" try: - awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type) + rest_api_name = name or "ansible-temp-api" + awsret = create_api(client, name=rest_api_name, description=desc, endpoint_type=endpoint_type, tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: module.fail_json_aws(e, msg="creating API") return awsret["id"] @@ -284,7 +378,7 @@ def delete_rest_api(module, client, api_id): try: delete_response = delete_api(client, api_id) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg="deleting API {0}".format(api_id)) + module.fail_json_aws(e, msg=f"deleting API {api_id}") return delete_response @@ -301,28 +395,56 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): configure_response = None try: configure_response = configure_api(client, api_id, api_data=api_data) + configure_response.pop("ResponseMetadata", None) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg="configuring API {0}".format(api_id)) + module.fail_json_aws(e, msg=f"configuring API {api_id}") deploy_response = None - stage = module.params.get('stage') + stage = module.params.get("stage") if stage: try: deploy_response = create_deployment(client, api_id, **module.params) + deploy_response.pop("ResponseMetadata", None) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - msg = "deploying api {0} to stage {1}".format(api_id, stage) + msg = f"deploying api {api_id} to stage {stage}" module.fail_json_aws(e, msg) return configure_response, deploy_response -retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']} +def get_api_by_tags(client, module, name, tags): + count = 0 + result = None + for api in list_apis(client): + if name and api["name"] != name: + continue + api_tags = api.get("tags", {}) + if all((tag_key in api_tags and api_tags[tag_key] == tag_value for tag_key, tag_value in tags.items())): + result = api + count += 1 + + if count > 1: + args = "Tags" + if name: + args += " and name" + module.fail_json(msg=f"{args} provided do not identify a unique API gateway") + return result + + +retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ["TooManyRequestsException"]} @AWSRetry.jittered_backoff(**retry_params) -def create_api(client, name=None, description=None, endpoint_type=None): - return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]}) +def create_api(client, name, description=None, endpoint_type=None, tags=None): + params = {"name": name} + if description: + params["description"] = description + if endpoint_type: + params["endpointConfiguration"] = {"types": [endpoint_type]} + if tags: + params["tags"] = tags + return client.create_rest_api(**params) @AWSRetry.jittered_backoff(**retry_params) @@ -337,32 +459,53 @@ def configure_api(client, api_id, api_data=None, mode="overwrite"): @AWSRetry.jittered_backoff(**retry_params) def create_deployment(client, rest_api_id, **params): - canary_settings = params.get('stage_canary_settings') + canary_settings = params.get("stage_canary_settings") if canary_settings and len(canary_settings) > 0: result = client.create_deployment( restApiId=rest_api_id, - stageName=params.get('stage'), - description=params.get('deploy_desc'), - cacheClusterEnabled=params.get('cache_enabled'), - cacheClusterSize=params.get('cache_size'), - variables=params.get('stage_variables'), + stageName=params.get("stage"), + description=params.get("deploy_desc"), + cacheClusterEnabled=params.get("cache_enabled"), + cacheClusterSize=params.get("cache_size"), + variables=params.get("stage_variables"), canarySettings=canary_settings, - tracingEnabled=params.get('tracing_enabled') + tracingEnabled=params.get("tracing_enabled"), ) else: result = client.create_deployment( restApiId=rest_api_id, - stageName=params.get('stage'), - description=params.get('deploy_desc'), - cacheClusterEnabled=params.get('cache_enabled'), - cacheClusterSize=params.get('cache_size'), - variables=params.get('stage_variables'), - tracingEnabled=params.get('tracing_enabled') + stageName=params.get("stage"), + description=params.get("deploy_desc"), + cacheClusterEnabled=params.get("cache_enabled"), + cacheClusterSize=params.get("cache_size"), + variables=params.get("stage_variables"), + tracingEnabled=params.get("tracing_enabled"), ) return result -if __name__ == '__main__': +@AWSRetry.jittered_backoff(**retry_params) +def list_apis(client): + paginator = client.get_paginator("get_rest_apis") + return paginator.paginate().build_full_result().get("items", []) + + +@AWSRetry.jittered_backoff(**retry_params) +def describe_api(client, module, rest_api_id): + try: + response = client.get_rest_api(restApiId=rest_api_id) + response.pop("ResponseMetadata") + except is_boto3_error_code("ResourceNotFoundException"): + response = {} + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get Rest API '{rest_api_id}'.") + return response + + +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py b/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py index 9b4ec8572..8ffbdaf20 100644 --- a/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py +++ b/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: api_gateway_domain short_description: Manage AWS API Gateway custom domains @@ -57,17 +55,17 @@ options: default: present choices: [ 'present', 'absent' ] type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 notes: - Does not create a DNS entry on Route53, for that use the M(community.aws.route53) module. - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated) options to add own Certificates. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Setup endpoint for a custom domain for your API Gateway HTTP API community.aws.api_gateway_domain: domain_name: myapi.foobar.com @@ -75,7 +73,8 @@ EXAMPLES = ''' security_policy: TLS_1_2 endpoint_type: EDGE domain_mappings: - - { rest_api_id: abc123, stage: production } + - rest_api_id: abc123 + stage: production state: present register: api_gw_domain_result @@ -88,9 +87,9 @@ EXAMPLES = ''' zone: foobar.com alias_hosted_zone_id: "{{ api_gw_domain_result.response.domain.distribution_hosted_zone_id }}" command: create -''' +""" -RETURN = ''' +RETURN = r""" response: description: The data returned by create_domain_name (or update and delete) and create_base_path_mapping methods by boto3. returned: success @@ -110,27 +109,33 @@ response: path_mappings: [ { base_path: '(empty)', rest_api_id: 'abcd123', stage: 'production' } ] -''' +""" + +import copy try: - from botocore.exceptions import ClientError, BotoCoreError, EndpointConnectionError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import EndpointConnectionError except ImportError: pass # caught by imported AnsibleAWSModule -import copy +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_domain(module, client): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") result = {} try: - result['domain'] = get_domain_name(client, domain_name) - result['path_mappings'] = get_domain_mappings(client, domain_name) - except is_boto3_error_code('NotFoundException'): + result["domain"] = get_domain_name(client, domain_name) + result["path_mappings"] = get_domain_mappings(client, domain_name) + except is_boto3_error_code("NotFoundException"): return None except (ClientError, BotoCoreError, EndpointConnectionError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="getting API GW domain") @@ -138,28 +143,28 @@ def get_domain(module, client): def create_domain(module, client): - path_mappings = module.params.get('domain_mappings', []) - domain_name = module.params.get('domain_name') - result = {'domain': {}, 'path_mappings': []} + path_mappings = module.params.get("domain_mappings", []) + domain_name = module.params.get("domain_name") + result = {"domain": {}, "path_mappings": []} try: - result['domain'] = create_domain_name( + result["domain"] = create_domain_name( module, client, domain_name, - module.params.get('certificate_arn'), - module.params.get('endpoint_type'), - module.params.get('security_policy') + module.params.get("certificate_arn"), + module.params.get("endpoint_type"), + module.params.get("security_policy"), ) for mapping in path_mappings: - base_path = mapping.get('base_path', '') - rest_api_id = mapping.get('rest_api_id') - stage = mapping.get('stage') + base_path = mapping.get("base_path", "") + rest_api_id = mapping.get("rest_api_id") + stage = mapping.get("stage") if rest_api_id is None or stage is None: - module.fail_json('Every domain mapping needs a rest_api_id and stage name') + module.fail_json("Every domain mapping needs a rest_api_id and stage name") - result['path_mappings'].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) + result["path_mappings"].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="creating API GW domain") @@ -167,54 +172,56 @@ def create_domain(module, client): def update_domain(module, client, existing_domain): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") result = existing_domain - result['updated'] = False + result["updated"] = False - domain = existing_domain.get('domain') + domain = existing_domain.get("domain") # Compare only relevant set of domain arguments. # As get_domain_name gathers all kind of state information that can't be set anyways. # Also this module doesn't support custom TLS cert setup params as they are kind of deprecated already and would increase complexity. existing_domain_settings = { - 'certificate_arn': domain.get('certificate_arn'), - 'security_policy': domain.get('security_policy'), - 'endpoint_type': domain.get('endpoint_configuration').get('types')[0] + "certificate_arn": domain.get("certificate_arn"), + "security_policy": domain.get("security_policy"), + "endpoint_type": domain.get("endpoint_configuration").get("types")[0], } specified_domain_settings = { - 'certificate_arn': module.params.get('certificate_arn'), - 'security_policy': module.params.get('security_policy'), - 'endpoint_type': module.params.get('endpoint_type') + "certificate_arn": module.params.get("certificate_arn"), + "security_policy": module.params.get("security_policy"), + "endpoint_type": module.params.get("endpoint_type"), } if specified_domain_settings != existing_domain_settings: try: - result['domain'] = update_domain_name(client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings)) - result['updated'] = True + result["domain"] = update_domain_name( + client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings) + ) + result["updated"] = True except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="updating API GW domain") - existing_mappings = copy.deepcopy(existing_domain.get('path_mappings', [])) + existing_mappings = copy.deepcopy(existing_domain.get("path_mappings", [])) # Cleanout `base_path: "(none)"` elements from dicts as those won't match with specified mappings for mapping in existing_mappings: - if mapping.get('base_path', 'missing') == '(none)': - mapping.pop('base_path') + if mapping.get("base_path", "missing") == "(none)": + mapping.pop("base_path") - specified_mappings = copy.deepcopy(module.params.get('domain_mappings', [])) + specified_mappings = copy.deepcopy(module.params.get("domain_mappings", [])) # Cleanout `base_path: ""` elements from dicts as those won't match with existing mappings for mapping in specified_mappings: - if mapping.get('base_path', 'missing') == '': - mapping.pop('base_path') + if mapping.get("base_path", "missing") == "": + mapping.pop("base_path") if specified_mappings != existing_mappings: try: # When lists missmatch delete all existing mappings before adding new ones as specified - for mapping in existing_domain.get('path_mappings', []): - delete_domain_mapping(client, domain_name, mapping['base_path']) - for mapping in module.params.get('domain_mappings', []): - result['path_mappings'] = add_domain_mapping( - client, domain_name, mapping.get('base_path', ''), mapping.get('rest_api_id'), mapping.get('stage') + for mapping in existing_domain.get("path_mappings", []): + delete_domain_mapping(client, domain_name, mapping["base_path"]) + for mapping in module.params.get("domain_mappings", []): + result["path_mappings"] = add_domain_mapping( + client, domain_name, mapping.get("base_path", ""), mapping.get("rest_api_id"), mapping.get("stage") ) - result['updated'] = True + result["updated"] = True except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="updating API GW domain mapping") @@ -222,7 +229,7 @@ def update_domain(module, client, existing_domain): def delete_domain(module, client): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") try: result = delete_domain_name(client, domain_name) except (ClientError, BotoCoreError, EndpointConnectionError) as e: @@ -240,19 +247,19 @@ def get_domain_name(client, domain_name): @AWSRetry.jittered_backoff(**retry_params) def get_domain_mappings(client, domain_name): - return client.get_base_path_mappings(domainName=domain_name, limit=200).get('items', []) + return client.get_base_path_mappings(domainName=domain_name, limit=200).get("items", []) @AWSRetry.jittered_backoff(**retry_params) def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy): - endpoint_configuration = {'types': [endpoint_type]} + endpoint_configuration = {"types": [endpoint_type]} - if endpoint_type == 'EDGE': + if endpoint_type == "EDGE": return client.create_domain_name( domainName=domain_name, certificateArn=certificate_arn, endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy + securityPolicy=security_policy, ) else: # Use regionalCertificateArn for regional domain deploys @@ -260,13 +267,15 @@ def create_domain_name(module, client, domain_name, certificate_arn, endpoint_ty domainName=domain_name, regionalCertificateArn=certificate_arn, endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy + securityPolicy=security_policy, ) @AWSRetry.jittered_backoff(**retry_params) def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage): - return client.create_base_path_mapping(domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage) + return client.create_base_path_mapping( + domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage + ) @AWSRetry.jittered_backoff(**retry_params) @@ -294,29 +303,29 @@ def delete_domain_mapping(client, domain_name, base_path): def main(): argument_spec = dict( - domain_name=dict(type='str', required=True), - certificate_arn=dict(type='str', required=True), - security_policy=dict(type='str', default='TLS_1_2', choices=['TLS_1_0', 'TLS_1_2']), - endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']), - domain_mappings=dict(type='list', required=True, elements='dict'), - state=dict(type='str', default='present', choices=['present', 'absent']) + domain_name=dict(type="str", required=True), + certificate_arn=dict(type="str", required=True), + security_policy=dict(type="str", default="TLS_1_2", choices=["TLS_1_0", "TLS_1_2"]), + endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), + domain_mappings=dict(type="list", required=True, elements="dict"), + state=dict(type="str", default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=False + supports_check_mode=False, ) - client = module.client('apigateway') + client = module.client("apigateway") - state = module.params.get('state') + state = module.params.get("state") changed = False if state == "present": existing_domain = get_domain(module, client) if existing_domain is not None: result = update_domain(module, client, existing_domain) - changed = result['updated'] + changed = result["updated"] else: result = create_domain(module, client) changed = True @@ -327,10 +336,10 @@ def main(): exit_args = {"changed": changed} if result is not None: - exit_args['response'] = result + exit_args["response"] = result module.exit_json(**exit_args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway_info.py b/ansible_collections/community/aws/plugins/modules/api_gateway_info.py new file mode 100644 index 000000000..fd38d795a --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/api_gateway_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: api_gateway_info +version_added: 6.1.0 +short_description: Gather information about ec2 instances in AWS +description: + - Gather information about ec2 instances in AWS +options: + ids: + description: + - The list of the string identifiers of the associated RestApis. + type: list + elements: str +author: + - Aubin Bikouo (@abikouo) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +--- +# List all API gateway +- name: List all for a specific function + community.aws.api_gateway_info: + +# Get information for a specific API gateway +- name: List all for a specific function + community.aws.api_gateway_info: + ids: + - 012345678a + - abcdefghij +""" + +RETURN = r""" +--- +rest_apis: + description: A list of API gateway. + returned: always + type: complex + contains: + name: + description: The name of the API. + returned: success + type: str + sample: 'ansible-tmp-api' + id: + description: The identifier of the API. + returned: success + type: str + sample: 'abcdefgh' + api_key_source: + description: The source of the API key for metering requests according to a usage plan. + returned: success + type: str + sample: 'HEADER' + created_date: + description: The timestamp when the API was created. + returned: success + type: str + sample: "2020-01-01T11:37:59+00:00" + description: + description: The description of the API. + returned: success + type: str + sample: "Automatic deployment by Ansible." + disable_execute_api_endpoint: + description: Specifies whether clients can invoke your API by using the default execute-api endpoint. + returned: success + type: bool + sample: False + endpoint_configuration: + description: The endpoint configuration of this RestApi showing the endpoint types of the API. + returned: success + type: dict + sample: {"types": ["REGIONAL"]} + tags: + description: The collection of tags. + returned: success + type: dict + sample: {"key": "value"} +""" + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +@AWSRetry.jittered_backoff() +def _list_rest_apis(connection, **params): + paginator = connection.get_paginator("get_rest_apis") + return paginator.paginate(**params).build_full_result().get("items", []) + + +@AWSRetry.jittered_backoff() +def _describe_rest_api(connection, module, rest_api_id): + try: + response = connection.get_rest_api(restApiId=rest_api_id) + response.pop("ResponseMetadata") + except is_boto3_error_code("ResourceNotFoundException"): + response = {} + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get Rest API '{rest_api_id}'.") + return response + + +def main(): + argument_spec = dict( + ids=dict(type="list", elements="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + connection = module.client("apigateway") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + ids = module.params.get("ids") + if ids: + rest_apis = [] + for rest_api_id in ids: + result = _describe_rest_api(connection, module, rest_api_id) + if result: + rest_apis.append(result) + else: + rest_apis = _list_rest_apis(connection) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_rest_apis = [camel_dict_to_snake_dict(item) for item in rest_apis] + module.exit_json(changed=False, rest_apis=snaked_rest_apis) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py b/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py index d20c107de..beb2247ac 100644 --- a/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py +++ b/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: application_autoscaling_policy version_added: 1.0.0 @@ -104,12 +102,12 @@ options: required: false type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create step scaling policy for ECS Service @@ -160,9 +158,9 @@ EXAMPLES = ''' service_namespace: ecs resource_id: service/cluster-name/service-name scalable_dimension: ecs:service:DesiredCount -''' +""" -RETURN = ''' +RETURN = r""" alarms: description: List of the CloudWatch alarms associated with the scaling policy returned: when state present @@ -283,27 +281,29 @@ creation_time: returned: when state present type: str sample: '2017-09-28T08:22:51.881000-03:00' -''' # NOQA - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict +""" try: import botocore except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import _camel_to_snake +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + # Merge the results of the scalable target creation and policy deletion/creation # There's no risk in overriding values since mutual keys have the same values in our case def merge_results(scalable_target_result, policy_result): - if scalable_target_result['changed'] or policy_result['changed']: + if scalable_target_result["changed"] or policy_result["changed"]: changed = True else: changed = False - merged_response = scalable_target_result['response'].copy() - merged_response.update(policy_result['response']) + merged_response = scalable_target_result["response"].copy() + merged_response.update(policy_result["response"]) return {"changed": changed, "response": merged_response} @@ -312,22 +312,22 @@ def delete_scaling_policy(connection, module): changed = False try: scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") - if scaling_policy['ScalingPolicies']: + if scaling_policy["ScalingPolicies"]: try: connection.delete_scaling_policy( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyName=module.params.get('policy_name'), + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyName=module.params.get("policy_name"), ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -341,11 +341,11 @@ def create_scalable_target(connection, module): try: scalable_targets = connection.describe_scalable_targets( - ServiceNamespace=module.params.get('service_namespace'), + ServiceNamespace=module.params.get("service_namespace"), ResourceIds=[ - module.params.get('resource_id'), + module.params.get("resource_id"), ], - ScalableDimension=module.params.get('scalable_dimension') + ScalableDimension=module.params.get("scalable_dimension"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scalable targets") @@ -353,41 +353,38 @@ def create_scalable_target(connection, module): # Scalable target registration will occur if: # 1. There is no scalable target registered for this service # 2. A scalable target exists, different min/max values are defined and override is set to "yes" - if ( - not scalable_targets['ScalableTargets'] - or ( - module.params.get('override_task_capacity') - and ( - scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks') - or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks') - ) + if not scalable_targets["ScalableTargets"] or ( + module.params.get("override_task_capacity") + and ( + scalable_targets["ScalableTargets"][0]["MinCapacity"] != module.params.get("minimum_tasks") + or scalable_targets["ScalableTargets"][0]["MaxCapacity"] != module.params.get("maximum_tasks") ) ): changed = True try: connection.register_scalable_target( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - MinCapacity=module.params.get('minimum_tasks'), - MaxCapacity=module.params.get('maximum_tasks') + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + MinCapacity=module.params.get("minimum_tasks"), + MaxCapacity=module.params.get("maximum_tasks"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to register scalable target") try: response = connection.describe_scalable_targets( - ServiceNamespace=module.params.get('service_namespace'), + ServiceNamespace=module.params.get("service_namespace"), ResourceIds=[ - module.params.get('resource_id'), + module.params.get("resource_id"), ], - ScalableDimension=module.params.get('scalable_dimension') + ScalableDimension=module.params.get("scalable_dimension"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scalable targets") - if (response['ScalableTargets']): - snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0]) + if response["ScalableTargets"]: + snaked_response = camel_dict_to_snake_dict(response["ScalableTargets"][0]) else: snaked_response = {} @@ -397,78 +394,82 @@ def create_scalable_target(connection, module): def create_scaling_policy(connection, module): try: scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") changed = False - if scaling_policy['ScalingPolicies']: - scaling_policy = scaling_policy['ScalingPolicies'][0] + if scaling_policy["ScalingPolicies"]: + scaling_policy = scaling_policy["ScalingPolicies"][0] # check if the input parameters are equal to what's already configured - for attr in ('PolicyName', - 'ServiceNamespace', - 'ResourceId', - 'ScalableDimension', - 'PolicyType', - 'StepScalingPolicyConfiguration', - 'TargetTrackingScalingPolicyConfiguration'): + for attr in ( + "PolicyName", + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "PolicyType", + "StepScalingPolicyConfiguration", + "TargetTrackingScalingPolicyConfiguration", + ): if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)): changed = True scaling_policy[attr] = module.params.get(_camel_to_snake(attr)) else: changed = True scaling_policy = { - 'PolicyName': module.params.get('policy_name'), - 'ServiceNamespace': module.params.get('service_namespace'), - 'ResourceId': module.params.get('resource_id'), - 'ScalableDimension': module.params.get('scalable_dimension'), - 'PolicyType': module.params.get('policy_type'), - 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'), - 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration') + "PolicyName": module.params.get("policy_name"), + "ServiceNamespace": module.params.get("service_namespace"), + "ResourceId": module.params.get("resource_id"), + "ScalableDimension": module.params.get("scalable_dimension"), + "PolicyType": module.params.get("policy_type"), + "StepScalingPolicyConfiguration": module.params.get("step_scaling_policy_configuration"), + "TargetTrackingScalingPolicyConfiguration": module.params.get( + "target_tracking_scaling_policy_configuration" + ), } if changed: try: - if (module.params.get('step_scaling_policy_configuration')): + if module.params.get("step_scaling_policy_configuration"): connection.put_scaling_policy( - PolicyName=scaling_policy['PolicyName'], - ServiceNamespace=scaling_policy['ServiceNamespace'], - ResourceId=scaling_policy['ResourceId'], - ScalableDimension=scaling_policy['ScalableDimension'], - PolicyType=scaling_policy['PolicyType'], - StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration'] + PolicyName=scaling_policy["PolicyName"], + ServiceNamespace=scaling_policy["ServiceNamespace"], + ResourceId=scaling_policy["ResourceId"], + ScalableDimension=scaling_policy["ScalableDimension"], + PolicyType=scaling_policy["PolicyType"], + StepScalingPolicyConfiguration=scaling_policy["StepScalingPolicyConfiguration"], ) - elif (module.params.get('target_tracking_scaling_policy_configuration')): + elif module.params.get("target_tracking_scaling_policy_configuration"): connection.put_scaling_policy( - PolicyName=scaling_policy['PolicyName'], - ServiceNamespace=scaling_policy['ServiceNamespace'], - ResourceId=scaling_policy['ResourceId'], - ScalableDimension=scaling_policy['ScalableDimension'], - PolicyType=scaling_policy['PolicyType'], - TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration'] + PolicyName=scaling_policy["PolicyName"], + ServiceNamespace=scaling_policy["ServiceNamespace"], + ResourceId=scaling_policy["ResourceId"], + ScalableDimension=scaling_policy["ScalableDimension"], + PolicyType=scaling_policy["PolicyType"], + TargetTrackingScalingPolicyConfiguration=scaling_policy["TargetTrackingScalingPolicyConfiguration"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create scaling policy") try: response = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") - if (response['ScalingPolicies']): - snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0]) + if response["ScalingPolicies"]: + snaked_response = camel_dict_to_snake_dict(response["ScalingPolicies"][0]) else: snaked_response = {} @@ -477,52 +478,63 @@ def create_scaling_policy(connection, module): def main(): argument_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - policy_name=dict(type='str', required=True), - service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']), - resource_id=dict(type='str', required=True), - scalable_dimension=dict(type='str', - required=True, - choices=['ecs:service:DesiredCount', - 'ec2:spot-fleet-request:TargetCapacity', - 'elasticmapreduce:instancegroup:InstanceCount', - 'appstream:fleet:DesiredCapacity', - 'dynamodb:table:ReadCapacityUnits', - 'dynamodb:table:WriteCapacityUnits', - 'dynamodb:index:ReadCapacityUnits', - 'dynamodb:index:WriteCapacityUnits']), - policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']), - step_scaling_policy_configuration=dict(type='dict'), + state=dict(type="str", required=True, choices=["present", "absent"]), + policy_name=dict(type="str", required=True), + service_namespace=dict( + type="str", required=True, choices=["appstream", "dynamodb", "ec2", "ecs", "elasticmapreduce"] + ), + resource_id=dict(type="str", required=True), + scalable_dimension=dict( + type="str", + required=True, + choices=[ + "ecs:service:DesiredCount", + "ec2:spot-fleet-request:TargetCapacity", + "elasticmapreduce:instancegroup:InstanceCount", + "appstream:fleet:DesiredCapacity", + "dynamodb:table:ReadCapacityUnits", + "dynamodb:table:WriteCapacityUnits", + "dynamodb:index:ReadCapacityUnits", + "dynamodb:index:WriteCapacityUnits", + ], + ), + policy_type=dict(type="str", required=True, choices=["StepScaling", "TargetTrackingScaling"]), + step_scaling_policy_configuration=dict(type="dict"), target_tracking_scaling_policy_configuration=dict( - type='dict', + type="dict", options=dict( - CustomizedMetricSpecification=dict(type='dict'), - DisableScaleIn=dict(type='bool'), - PredefinedMetricSpecification=dict(type='dict'), - ScaleInCooldown=dict(type='int'), - ScaleOutCooldown=dict(type='int'), - TargetValue=dict(type='float'), - ) + CustomizedMetricSpecification=dict(type="dict"), + DisableScaleIn=dict(type="bool"), + PredefinedMetricSpecification=dict(type="dict"), + ScaleInCooldown=dict(type="int"), + ScaleOutCooldown=dict(type="int"), + TargetValue=dict(type="float"), + ), ), - minimum_tasks=dict(type='int'), - maximum_tasks=dict(type='int'), - override_task_capacity=dict(type='bool'), + minimum_tasks=dict(type="int"), + maximum_tasks=dict(type="int"), + override_task_capacity=dict(type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('application-autoscaling') + connection = module.client("application-autoscaling") # Remove any target_tracking_scaling_policy_configuration suboptions that are None policy_config_options = [ - 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue' + "CustomizedMetricSpecification", + "DisableScaleIn", + "PredefinedMetricSpecification", + "ScaleInCooldown", + "ScaleOutCooldown", + "TargetValue", ] - if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict): + if isinstance(module.params["target_tracking_scaling_policy_configuration"], dict): for option in policy_config_options: - if module.params['target_tracking_scaling_policy_configuration'][option] is None: - module.params['target_tracking_scaling_policy_configuration'].pop(option) + if module.params["target_tracking_scaling_policy_configuration"][option] is None: + module.params["target_tracking_scaling_policy_configuration"].pop(option) - if module.params.get("state") == 'present': + if module.params.get("state") == "present": # A scalable target must be registered prior to creating a scaling policy scalable_target_result = create_scalable_target(connection, module) policy_result = create_scaling_policy(connection, module) @@ -535,5 +547,5 @@ def main(): module.exit_json(**policy_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py b/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py index 8f585a102..94a8d031f 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_complete_lifecycle_action short_description: Completes the lifecycle action of an instance @@ -37,12 +36,12 @@ options: type: str required: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Complete the lifecycle action - aws_asg_complete_lifecycle_action: @@ -50,47 +49,47 @@ EXAMPLES = ''' lifecycle_hook_name: my-lifecycle-hook lifecycle_action_result: CONTINUE instance_id: i-123knm1l2312 -''' +""" -RETURN = ''' +RETURN = r""" --- status: description: How things went returned: success type: str sample: ["OK"] -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def main(): argument_spec = dict( - asg_name=dict(required=True, type='str'), - lifecycle_hook_name=dict(required=True, type='str'), - lifecycle_action_result=dict(required=True, type='str', choices=['CONTINUE', 'ABANDON']), - instance_id=dict(required=True, type='str') + asg_name=dict(required=True, type="str"), + lifecycle_hook_name=dict(required=True, type="str"), + lifecycle_action_result=dict(required=True, type="str", choices=["CONTINUE", "ABANDON"]), + instance_id=dict(required=True, type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - asg_name = module.params.get('asg_name') - lifecycle_hook_name = module.params.get('lifecycle_hook_name') - lifecycle_action_result = module.params.get('lifecycle_action_result') - instance_id = module.params.get('instance_id') + asg_name = module.params.get("asg_name") + lifecycle_hook_name = module.params.get("lifecycle_hook_name") + lifecycle_action_result = module.params.get("lifecycle_action_result") + instance_id = module.params.get("instance_id") - autoscaling = module.client('autoscaling') + autoscaling = module.client("autoscaling") try: results = autoscaling.complete_lifecycle_action( LifecycleHookName=lifecycle_hook_name, AutoScalingGroupName=asg_name, LifecycleActionResult=lifecycle_action_result, - InstanceId=instance_id + InstanceId=instance_id, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to completes the lifecycle action") @@ -98,5 +97,5 @@ def main(): module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py index 94c2bb38c..b301fea94 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_instance_refresh version_added: 3.2.0 @@ -61,12 +59,12 @@ options: type: int type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Start a refresh @@ -86,10 +84,9 @@ EXAMPLES = ''' preferences: min_healthy_percentage: 91 instance_warmup: 60 +""" -''' - -RETURN = ''' +RETURN = r""" --- instance_refresh_id: description: instance refresh id @@ -137,20 +134,22 @@ instances_to_update: returned: success type: int sample: 5 -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def start_or_cancel_instance_refresh(conn, module): """ @@ -179,75 +178,75 @@ def start_or_cancel_instance_refresh(conn, module): } """ - asg_state = module.params.get('state') - asg_name = module.params.get('name') - preferences = module.params.get('preferences') + asg_state = module.params.get("state") + asg_name = module.params.get("name") + preferences = module.params.get("preferences") args = {} - args['AutoScalingGroupName'] = asg_name - if asg_state == 'started': - args['Strategy'] = module.params.get('strategy') + args["AutoScalingGroupName"] = asg_name + if asg_state == "started": + args["Strategy"] = module.params.get("strategy") if preferences: - if asg_state == 'cancelled': - module.fail_json(msg='can not pass preferences dict when canceling a refresh') + if asg_state == "cancelled": + module.fail_json(msg="can not pass preferences dict when canceling a refresh") _prefs = scrub_none_parameters(preferences) - args['Preferences'] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) + args["Preferences"] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) cmd_invocations = { - 'cancelled': conn.cancel_instance_refresh, - 'started': conn.start_instance_refresh, + "cancelled": conn.cancel_instance_refresh, + "started": conn.start_instance_refresh, } try: if module.check_mode: - if asg_state == 'started': - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]') + if asg_state == "started": + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( + "InstanceRefreshes", "[]" + ) if ongoing_refresh: - module.exit_json(changed=False, msg='In check_mode - Instance Refresh is already in progress, can not start new instance refresh.') + module.exit_json( + changed=False, + msg="In check_mode - Instance Refresh is already in progress, can not start new instance refresh.", + ) else: - module.exit_json(changed=True, msg='Would have started instance refresh if not in check mode.') - elif asg_state == 'cancelled': - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')[0] - if ongoing_refresh.get('Status', '') in ['Cancelling', 'Cancelled']: - module.exit_json(changed=False, msg='In check_mode - Instance Refresh already cancelled or is pending cancellation.') + module.exit_json(changed=True, msg="Would have started instance refresh if not in check mode.") + elif asg_state == "cancelled": + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( + "InstanceRefreshes", "[]" + )[0] + if ongoing_refresh.get("Status", "") in ["Cancelling", "Cancelled"]: + module.exit_json( + changed=False, + msg="In check_mode - Instance Refresh already cancelled or is pending cancellation.", + ) elif not ongoing_refresh: - module.exit_json(chaned=False, msg='In check_mode - No active referesh found, nothing to cancel.') + module.exit_json(chaned=False, msg="In check_mode - No active referesh found, nothing to cancel.") else: - module.exit_json(changed=True, msg='Would have cancelled instance refresh if not in check mode.') + module.exit_json(changed=True, msg="Would have cancelled instance refresh if not in check mode.") result = cmd_invocations[asg_state](aws_retry=True, **args) - instance_refreshes = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name, InstanceRefreshIds=[result['InstanceRefreshId']]) - result = dict( - instance_refreshes=camel_dict_to_snake_dict(instance_refreshes['InstanceRefreshes'][0]) + instance_refreshes = conn.describe_instance_refreshes( + AutoScalingGroupName=asg_name, InstanceRefreshIds=[result["InstanceRefreshId"]] ) + result = dict(instance_refreshes=camel_dict_to_snake_dict(instance_refreshes["InstanceRefreshes"][0])) return module.exit_json(**result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg='Failed to {0} InstanceRefresh'.format( - asg_state.replace('ed', '') - ) - ) + module.fail_json_aws(e, msg=f"Failed to {asg_state.replace('ed', '')} InstanceRefresh") def main(): - argument_spec = dict( state=dict( - type='str', + type="str", required=True, - choices=['started', 'cancelled'], + choices=["started", "cancelled"], ), name=dict(required=True), - strategy=dict( - type='str', - default='Rolling', - required=False - ), + strategy=dict(type="str", default="Rolling", required=False), preferences=dict( - type='dict', + type="dict", required=False, options=dict( - min_healthy_percentage=dict(type='int', default=90), - instance_warmup=dict(type='int'), - ) + min_healthy_percentage=dict(type="int", default=90), + instance_warmup=dict(type="int"), + ), ), ) @@ -256,15 +255,12 @@ def main(): supports_check_mode=True, ) autoscaling = module.client( - 'autoscaling', - retry_decorator=AWSRetry.jittered_backoff( - retries=10, - catch_extra_error_codes=['InstanceRefreshInProgress'] - ) + "autoscaling", + retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=["InstanceRefreshInProgress"]), ) start_or_cancel_instance_refresh(autoscaling, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py index 3037d0b52..639940b1b 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py @@ -1,14 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_instance_refresh_info version_added: 3.2.0 @@ -18,7 +14,8 @@ description: - You can determine the status of a request by looking at the I(status) parameter. - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh_info). The usage did not change. -author: "Dan Khersonsky (@danquixote)" +author: + - "Dan Khersonsky (@danquixote)" options: name: description: @@ -41,12 +38,12 @@ options: type: int required: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Find an refresh by ASG name @@ -70,9 +67,9 @@ EXAMPLES = ''' name: somename-asg next_token: 'some-token-123' register: asgs -''' +""" -RETURN = ''' +RETURN = r""" --- instance_refresh_id: description: instance refresh id @@ -120,16 +117,19 @@ instances_to_update: returned: success type: int sample: 5 -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def find_asg_instance_refreshes(conn, module): @@ -158,51 +158,51 @@ def find_asg_instance_refreshes(conn, module): ], 'next_token': 'string' } - """ + """ - asg_name = module.params.get('name') - asg_ids = module.params.get('ids') - asg_next_token = module.params.get('next_token') - asg_max_records = module.params.get('max_records') + asg_name = module.params.get("name") + asg_ids = module.params.get("ids") + asg_next_token = module.params.get("next_token") + asg_max_records = module.params.get("max_records") args = {} - args['AutoScalingGroupName'] = asg_name + args["AutoScalingGroupName"] = asg_name if asg_ids: - args['InstanceRefreshIds'] = asg_ids + args["InstanceRefreshIds"] = asg_ids if asg_next_token: - args['NextToken'] = asg_next_token + args["NextToken"] = asg_next_token if asg_max_records: - args['MaxRecords'] = asg_max_records + args["MaxRecords"] = asg_max_records try: instance_refreshes_result = {} response = conn.describe_instance_refreshes(**args) - if 'InstanceRefreshes' in response: + if "InstanceRefreshes" in response: instance_refreshes_dict = dict( - instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', '')) - instance_refreshes_result = camel_dict_to_snake_dict( - instance_refreshes_dict) + instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "") + ) + instance_refreshes_result = camel_dict_to_snake_dict(instance_refreshes_dict) - while 'NextToken' in response: - args['NextToken'] = response['NextToken'] + while "NextToken" in response: + args["NextToken"] = response["NextToken"] response = conn.describe_instance_refreshes(**args) - if 'InstanceRefreshes' in response: - instance_refreshes_dict = camel_dict_to_snake_dict(dict( - instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', ''))) + if "InstanceRefreshes" in response: + instance_refreshes_dict = camel_dict_to_snake_dict( + dict(instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "")) + ) instance_refreshes_result.update(instance_refreshes_dict) return module.exit_json(**instance_refreshes_result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to describe InstanceRefreshes') + module.fail_json_aws(e, msg="Failed to describe InstanceRefreshes") def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - ids=dict(required=False, default=[], elements='str', type='list'), - next_token=dict(required=False, default=None, type='str', no_log=True), - max_records=dict(required=False, type='int'), + name=dict(required=True, type="str"), + ids=dict(required=False, default=[], elements="str", type="list"), + next_token=dict(required=False, default=None, type="str", no_log=True), + max_records=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -210,12 +210,9 @@ def main(): supports_check_mode=True, ) - autoscaling = module.client( - 'autoscaling', - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + autoscaling = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff(retries=10)) find_asg_instance_refreshes(autoscaling, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py index 1b13d1027..78b7ee233 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_launch_config version_added: 1.0.0 @@ -183,80 +180,86 @@ options: type: str choices: ['default', 'dedicated'] extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: create a launch configuration with an encrypted volume community.aws.autoscaling_launch_config: name: special image_id: ami-XXX key_name: default - security_groups: ['group', 'group2' ] + security_groups: + - 'group' + - 'group2' instance_type: t1.micro volumes: - - device_name: /dev/sda1 - volume_size: 100 - volume_type: io1 - iops: 3000 - delete_on_termination: true - encrypted: true - - device_name: /dev/sdb - ephemeral: ephemeral0 + - device_name: /dev/sda1 + volume_size: 100 + volume_type: io1 + iops: 3000 + delete_on_termination: true + encrypted: true + - device_name: /dev/sdb + ephemeral: ephemeral0 - name: create a launch configuration using a running instance id as a basis community.aws.autoscaling_launch_config: name: special instance_id: i-00a48b207ec59e948 key_name: default - security_groups: ['launch-wizard-2' ] + security_groups: + - 'launch-wizard-2' volumes: - - device_name: /dev/sda1 - volume_size: 120 - volume_type: io1 - iops: 3000 - delete_on_termination: true + - device_name: /dev/sda1 + volume_size: 120 + volume_type: io1 + iops: 3000 + delete_on_termination: true - name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image community.aws.autoscaling_launch_config: name: special image_id: ami-XXX key_name: default - security_groups: ['group', 'group2' ] + security_groups: + - 'group' + - 'group2' instance_type: t1.micro volumes: - - device_name: /dev/sdf - no_device: true + - device_name: /dev/sdf + no_device: true - name: Use EBS snapshot ID for volume block: - - name: Set Volume Facts - ansible.builtin.set_fact: - volumes: - - device_name: /dev/sda1 - volume_size: 20 - ebs: - snapshot: snap-XXXX - volume_type: gp2 - delete_on_termination: true - encrypted: false - - - name: Create launch configuration - community.aws.autoscaling_launch_config: - name: lc1 - image_id: ami-xxxx - assign_public_ip: true - instance_type: t2.medium - key_name: my-key - security_groups: "['sg-xxxx']" - volumes: "{{ volumes }}" - register: lc_info -''' - -RETURN = r''' + - name: Set Volume Facts + ansible.builtin.set_fact: + volumes: + - device_name: /dev/sda1 + volume_size: 20 + ebs: + snapshot: snap-XXXX + volume_type: gp2 + delete_on_termination: true + encrypted: false + + - name: Create launch configuration + community.aws.autoscaling_launch_config: + name: lc1 + image_id: ami-xxxx + assign_public_ip: true + instance_type: t2.medium + key_name: my-key + security_groups: + - 'sg-xxxx' + volumes: "{{ volumes }}" + register: lc_info +""" + +RETURN = r""" arn: description: The Amazon Resource Name of the launch configuration. returned: when I(state=present) @@ -440,7 +443,7 @@ security_groups: type: list sample: - sg-5e27db2f -''' +""" import traceback @@ -454,181 +457,220 @@ from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def create_block_device_meta(module, volume): - if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume: - if 'volume_size' not in volume: - module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') - if 'snapshot' in volume: - if volume.get('volume_type') == 'io1' and 'iops' not in volume: - module.fail_json(msg='io1 volumes must have an iops value set') - if 'ephemeral' in volume: - if 'snapshot' in volume: - module.fail_json(msg='Cannot set both ephemeral and snapshot') + if "snapshot" not in volume and "ephemeral" not in volume and "no_device" not in volume: + if "volume_size" not in volume: + module.fail_json(msg="Size must be specified when creating a new volume or modifying the root volume") + if "snapshot" in volume: + if volume.get("volume_type") == "io1" and "iops" not in volume: + module.fail_json(msg="io1 volumes must have an iops value set") + if "ephemeral" in volume: + if "snapshot" in volume: + module.fail_json(msg="Cannot set both ephemeral and snapshot") return_object = {} - if 'ephemeral' in volume: - return_object['VirtualName'] = volume.get('ephemeral') + if "ephemeral" in volume: + return_object["VirtualName"] = volume.get("ephemeral") - if 'device_name' in volume: - return_object['DeviceName'] = volume.get('device_name') + if "device_name" in volume: + return_object["DeviceName"] = volume.get("device_name") - if 'no_device' in volume: - return_object['NoDevice'] = volume.get('no_device') + if "no_device" in volume: + return_object["NoDevice"] = volume.get("no_device") - if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'throughput', 'encrypted']): - return_object['Ebs'] = {} + if any( + key in volume + for key in [ + "snapshot", + "volume_size", + "volume_type", + "delete_on_termination", + "iops", + "throughput", + "encrypted", + ] + ): + return_object["Ebs"] = {} - if 'snapshot' in volume: - return_object['Ebs']['SnapshotId'] = volume.get('snapshot') + if "snapshot" in volume: + return_object["Ebs"]["SnapshotId"] = volume.get("snapshot") - if 'volume_size' in volume: - return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0)) + if "volume_size" in volume: + return_object["Ebs"]["VolumeSize"] = int(volume.get("volume_size", 0)) - if 'volume_type' in volume: - return_object['Ebs']['VolumeType'] = volume.get('volume_type') + if "volume_type" in volume: + return_object["Ebs"]["VolumeType"] = volume.get("volume_type") - if 'delete_on_termination' in volume: - return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False) + if "delete_on_termination" in volume: + return_object["Ebs"]["DeleteOnTermination"] = volume.get("delete_on_termination", False) - if 'iops' in volume: - return_object['Ebs']['Iops'] = volume.get('iops') + if "iops" in volume: + return_object["Ebs"]["Iops"] = volume.get("iops") - if 'throughput' in volume: - if volume.get('volume_type') != 'gp3': - module.fail_json(msg='The throughput parameter is supported only for GP3 volumes.') - return_object['Ebs']['Throughput'] = volume.get('throughput') + if "throughput" in volume: + if volume.get("volume_type") != "gp3": + module.fail_json(msg="The throughput parameter is supported only for GP3 volumes.") + return_object["Ebs"]["Throughput"] = volume.get("throughput") - if 'encrypted' in volume: - return_object['Ebs']['Encrypted'] = volume.get('encrypted') + if "encrypted" in volume: + return_object["Ebs"]["Encrypted"] = volume.get("encrypted") return return_object def create_launch_config(connection, module): - name = module.params.get('name') - vpc_id = module.params.get('vpc_id') + name = module.params.get("name") + vpc_id = module.params.get("vpc_id") try: - ec2_connection = module.client('ec2') + ec2_connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") try: - security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) + security_groups = get_ec2_security_group_ids_from_names( + module.params.get("security_groups"), ec2_connection, vpc_id=vpc_id, boto3=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to get Security Group IDs') + module.fail_json_aws(e, msg="Failed to get Security Group IDs") except ValueError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) - user_data = module.params.get('user_data') - user_data_path = module.params.get('user_data_path') - volumes = module.params['volumes'] - instance_monitoring = module.params.get('instance_monitoring') - assign_public_ip = module.params.get('assign_public_ip') - instance_profile_name = module.params.get('instance_profile_name') - ebs_optimized = module.params.get('ebs_optimized') - classic_link_vpc_id = module.params.get('classic_link_vpc_id') - classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') + user_data = module.params.get("user_data") + user_data_path = module.params.get("user_data_path") + volumes = module.params["volumes"] + instance_monitoring = module.params.get("instance_monitoring") + assign_public_ip = module.params.get("assign_public_ip") + instance_profile_name = module.params.get("instance_profile_name") + ebs_optimized = module.params.get("ebs_optimized") + classic_link_vpc_id = module.params.get("classic_link_vpc_id") + classic_link_vpc_security_groups = module.params.get("classic_link_vpc_security_groups") block_device_mapping = [] - convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price'] - - launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list))) + convert_list = [ + "image_id", + "instance_type", + "instance_type", + "instance_id", + "placement_tenancy", + "key_name", + "kernel_id", + "ramdisk_id", + "spot_price", + ] + + launch_config = snake_dict_to_camel_dict( + dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list) + ) if user_data_path: try: - with open(user_data_path, 'r') as user_data_file: + with open(user_data_path, "r") as user_data_file: user_data = user_data_file.read() except IOError as e: module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) if volumes: for volume in volumes: - if 'device_name' not in volume: - module.fail_json(msg='Device name must be set for volume') + if "device_name" not in volume: + module.fail_json(msg="Device name must be set for volume") # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume - if 'volume_size' not in volume or int(volume['volume_size']) > 0: + if "volume_size" not in volume or int(volume["volume_size"]) > 0: block_device_mapping.append(create_block_device_meta(module, volume)) try: - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe launch configuration by name") changed = False result = {} - launch_config['LaunchConfigurationName'] = name + launch_config["LaunchConfigurationName"] = name if security_groups is not None: - launch_config['SecurityGroups'] = security_groups + launch_config["SecurityGroups"] = security_groups if classic_link_vpc_id is not None: - launch_config['ClassicLinkVPCId'] = classic_link_vpc_id + launch_config["ClassicLinkVPCId"] = classic_link_vpc_id if instance_monitoring is not None: - launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring} + launch_config["InstanceMonitoring"] = {"Enabled": instance_monitoring} if classic_link_vpc_security_groups is not None: - launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups + launch_config["ClassicLinkVPCSecurityGroups"] = classic_link_vpc_security_groups if block_device_mapping: - launch_config['BlockDeviceMappings'] = block_device_mapping + launch_config["BlockDeviceMappings"] = block_device_mapping if instance_profile_name is not None: - launch_config['IamInstanceProfile'] = instance_profile_name + launch_config["IamInstanceProfile"] = instance_profile_name if assign_public_ip is not None: - launch_config['AssociatePublicIpAddress'] = assign_public_ip + launch_config["AssociatePublicIpAddress"] = assign_public_ip if user_data is not None: - launch_config['UserData'] = user_data + launch_config["UserData"] = user_data if ebs_optimized is not None: - launch_config['EbsOptimized'] = ebs_optimized + launch_config["EbsOptimized"] = ebs_optimized if len(launch_configs) == 0: try: connection.create_launch_configuration(**launch_config) - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) changed = True if launch_configs: launch_config = launch_configs[0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create launch configuration") - result = (dict((k, v) for k, v in launch_config.items() - if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings'])) + result = dict( + (k, v) + for k, v in launch_config.items() + if k not in ["Connection", "CreatedTime", "InstanceMonitoring", "BlockDeviceMappings"] + ) - result['CreatedTime'] = to_text(launch_config.get('CreatedTime')) + result["CreatedTime"] = to_text(launch_config.get("CreatedTime")) try: - result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled')) + result["InstanceMonitoring"] = module.boolean(launch_config.get("InstanceMonitoring").get("Enabled")) except AttributeError: - result['InstanceMonitoring'] = False - - result['BlockDeviceMappings'] = [] - - for block_device_mapping in launch_config.get('BlockDeviceMappings', []): - result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName'))) - if block_device_mapping.get('Ebs') is not None: - result['BlockDeviceMappings'][-1]['ebs'] = dict( - snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize')) + result["InstanceMonitoring"] = False + + result["BlockDeviceMappings"] = [] + + for block_device_mapping in launch_config.get("BlockDeviceMappings", []): + result["BlockDeviceMappings"].append( + dict( + device_name=block_device_mapping.get("DeviceName"), virtual_name=block_device_mapping.get("VirtualName") + ) + ) + if block_device_mapping.get("Ebs") is not None: + result["BlockDeviceMappings"][-1]["ebs"] = dict( + snapshot_id=block_device_mapping.get("Ebs").get("SnapshotId"), + volume_size=block_device_mapping.get("Ebs").get("VolumeSize"), + ) if user_data_path: - result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal + result["UserData"] = "hidden" # Otherwise, we dump binary to the user's terminal return_object = { - 'Name': result.get('LaunchConfigurationName'), - 'CreatedTime': result.get('CreatedTime'), - 'ImageId': result.get('ImageId'), - 'Arn': result.get('LaunchConfigurationARN'), - 'SecurityGroups': result.get('SecurityGroups'), - 'InstanceType': result.get('InstanceType'), - 'Result': result + "Name": result.get("LaunchConfigurationName"), + "CreatedTime": result.get("CreatedTime"), + "ImageId": result.get("ImageId"), + "Arn": result.get("LaunchConfigurationARN"), + "SecurityGroups": result.get("SecurityGroups"), + "InstanceType": result.get("InstanceType"), + "Result": result, } module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object)) @@ -636,10 +678,14 @@ def create_launch_config(connection, module): def delete_launch_config(connection, module): try: - name = module.params.get('name') - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + name = module.params.get("name") + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) if launch_configs: - connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName')) + connection.delete_launch_configuration( + LaunchConfigurationName=launch_configs[0].get("LaunchConfigurationName") + ) module.exit_json(changed=True) else: module.exit_json(changed=False) @@ -653,42 +699,42 @@ def main(): image_id=dict(), instance_id=dict(), key_name=dict(), - security_groups=dict(default=[], type='list', elements='str'), + security_groups=dict(default=[], type="list", elements="str"), user_data=dict(), - user_data_path=dict(type='path'), + user_data_path=dict(type="path"), kernel_id=dict(), - volumes=dict(type='list', elements='dict'), + volumes=dict(type="list", elements="dict"), instance_type=dict(), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='float'), + state=dict(default="present", choices=["present", "absent"]), + spot_price=dict(type="float"), ramdisk_id=dict(), instance_profile_name=dict(), - ebs_optimized=dict(default=False, type='bool'), - instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool'), - classic_link_vpc_security_groups=dict(type='list', elements='str'), + ebs_optimized=dict(default=False, type="bool"), + instance_monitoring=dict(default=False, type="bool"), + assign_public_ip=dict(type="bool"), + classic_link_vpc_security_groups=dict(type="list", elements="str"), classic_link_vpc_id=dict(), vpc_id=dict(), - placement_tenancy=dict(choices=['default', 'dedicated']) + placement_tenancy=dict(choices=["default", "dedicated"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['user_data', 'user_data_path']], + mutually_exclusive=[["user_data", "user_data_path"]], ) try: - connection = module.client('autoscaling') + connection = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="unable to establish connection") - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": create_launch_config(connection, module) - elif state == 'absent': + elif state == "absent": delete_launch_config(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py index ae8f187c0..037c21ed9 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py @@ -1,14 +1,10 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2015, Jose Armesto <jose@armesto.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_launch_config_find version_added: 1.0.0 @@ -40,12 +36,12 @@ options: - Corresponds to Python slice notation like list[:limit]. type: int extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Search for the Launch Configurations that start with "app" @@ -53,9 +49,9 @@ EXAMPLES = ''' name_regex: app.* sort_order: descending limit: 2 -''' +""" -RETURN = ''' +RETURN = r""" image_id: description: AMI id returned: when Launch Configuration was found @@ -132,7 +128,8 @@ associate_public_address: type: bool sample: True ... -''' +""" + import re try: @@ -140,54 +137,50 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def find_launch_configs(client, module): - name_regex = module.params.get('name_regex') - sort_order = module.params.get('sort_order') - limit = module.params.get('limit') + name_regex = module.params.get("name_regex") + sort_order = module.params.get("sort_order") + limit = module.params.get("limit") - paginator = client.get_paginator('describe_launch_configurations') + paginator = client.get_paginator("describe_launch_configurations") - response_iterator = paginator.paginate( - PaginationConfig={ - 'MaxItems': 1000, - 'PageSize': 100 - } - ) + response_iterator = paginator.paginate(PaginationConfig={"MaxItems": 1000, "PageSize": 100}) results = [] for response in response_iterator: - response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']), - response['LaunchConfigurations']) + response["LaunchConfigurations"] = filter( + lambda lc: re.compile(name_regex).match(lc["LaunchConfigurationName"]), response["LaunchConfigurations"] + ) - for lc in response['LaunchConfigurations']: + for lc in response["LaunchConfigurations"]: data = { - 'name': lc['LaunchConfigurationName'], - 'arn': lc['LaunchConfigurationARN'], - 'created_time': lc['CreatedTime'], - 'user_data': lc['UserData'], - 'instance_type': lc['InstanceType'], - 'image_id': lc['ImageId'], - 'ebs_optimized': lc['EbsOptimized'], - 'instance_monitoring': lc['InstanceMonitoring'], - 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'], - 'block_device_mappings': lc['BlockDeviceMappings'], - 'keyname': lc['KeyName'], - 'security_groups': lc['SecurityGroups'], - 'kernel_id': lc['KernelId'], - 'ram_disk_id': lc['RamdiskId'], - 'associate_public_address': lc.get('AssociatePublicIpAddress', False), + "name": lc["LaunchConfigurationName"], + "arn": lc["LaunchConfigurationARN"], + "created_time": lc["CreatedTime"], + "user_data": lc["UserData"], + "instance_type": lc["InstanceType"], + "image_id": lc["ImageId"], + "ebs_optimized": lc["EbsOptimized"], + "instance_monitoring": lc["InstanceMonitoring"], + "classic_link_vpc_security_groups": lc["ClassicLinkVPCSecurityGroups"], + "block_device_mappings": lc["BlockDeviceMappings"], + "keyname": lc["KeyName"], + "security_groups": lc["SecurityGroups"], + "kernel_id": lc["KernelId"], + "ram_disk_id": lc["RamdiskId"], + "associate_public_address": lc.get("AssociatePublicIpAddress", False), } results.append(data) - results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending')) + results.sort(key=lambda e: e["name"], reverse=(sort_order == "descending")) if limit: - results = results[:int(limit)] + results = results[:int(limit)] # fmt: skip module.exit_json(changed=False, results=results) @@ -195,8 +188,8 @@ def find_launch_configs(client, module): def main(): argument_spec = dict( name_regex=dict(required=True), - sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), - limit=dict(required=False, type='int'), + sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), + limit=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -204,12 +197,12 @@ def main(): ) try: - client = module.client('autoscaling') + client = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") find_launch_configs(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py index 1c98d7588..f5123c2ef 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_launch_config_info version_added: 1.0.0 @@ -48,12 +45,12 @@ options: - Corresponds to Python slice notation. type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all launch configurations @@ -67,9 +64,9 @@ EXAMPLES = r''' community.aws.autoscaling_launch_config_info: sort: created_time sort_order: descending -''' +""" -RETURN = r''' +RETURN = r""" block_device_mapping: description: Block device mapping for the instances of launch configuration. type: list @@ -149,43 +146,41 @@ user_data: description: User data available. type: str returned: always -''' +""" try: import botocore - from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def list_launch_configs(connection, module): - launch_config_name = module.params.get("name") - sort = module.params.get('sort') - sort_order = module.params.get('sort_order') - sort_start = module.params.get('sort_start') - sort_end = module.params.get('sort_end') + sort = module.params.get("sort") + sort_order = module.params.get("sort_order") + sort_start = module.params.get("sort_start") + sort_end = module.params.get("sort_end") try: - pg = connection.get_paginator('describe_launch_configurations') + pg = connection.get_paginator("describe_launch_configurations") launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result() - except ClientError as e: + except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Failed to list launch configs") snaked_launch_configs = [] - for launch_config in launch_configs['LaunchConfigurations']: + for launch_config in launch_configs["LaunchConfigurations"]: snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config)) for launch_config in snaked_launch_configs: - if 'CreatedTime' in launch_config: - launch_config['CreatedTime'] = str(launch_config['CreatedTime']) + if "CreatedTime" in launch_config: + launch_config["CreatedTime"] = str(launch_config["CreatedTime"]) if sort: - snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending')) + snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == "descending")) if sort and sort_start and sort_end: snaked_launch_configs = snaked_launch_configs[sort_start:sort_end] @@ -199,13 +194,23 @@ def list_launch_configs(connection, module): def main(): argument_spec = dict( - name=dict(required=False, default=[], type='list', elements='str'), - sort=dict(required=False, default=None, - choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), - sort_order=dict(required=False, default='ascending', - choices=['ascending', 'descending']), - sort_start=dict(required=False, type='int'), - sort_end=dict(required=False, type='int'), + name=dict(required=False, default=[], type="list", elements="str"), + sort=dict( + required=False, + default=None, + choices=[ + "launch_configuration_name", + "image_id", + "created_time", + "instance_type", + "kernel_id", + "ramdisk_id", + "key_name", + ], + ), + sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), + sort_start=dict(required=False, type="int"), + sort_end=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -214,12 +219,12 @@ def main(): ) try: - connection = module.client('autoscaling') + connection = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_launch_configs(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py b/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py index cf07b7681..a77fcce0a 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_lifecycle_hook version_added: 1.0.0 @@ -74,12 +71,12 @@ options: default: ABANDON type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create / Update lifecycle hook community.aws.autoscaling_lifecycle_hook: region: eu-central-1 @@ -96,9 +93,9 @@ EXAMPLES = ''' state: absent autoscaling_group_name: example lifecycle_hook_name: example -''' +""" -RETURN = ''' +RETURN = r""" --- auto_scaling_group_name: description: The unique name of the auto scaling group. @@ -130,7 +127,7 @@ lifecycle_transition: returned: success type: str sample: "autoscaling:EC2_INSTANCE_LAUNCHING" -''' +""" try: @@ -138,61 +135,64 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -def create_lifecycle_hook(connection, module): - lch_name = module.params.get('lifecycle_hook_name') - asg_name = module.params.get('autoscaling_group_name') - transition = module.params.get('transition') - role_arn = module.params.get('role_arn') - notification_target_arn = module.params.get('notification_target_arn') - notification_meta_data = module.params.get('notification_meta_data') - heartbeat_timeout = module.params.get('heartbeat_timeout') - default_result = module.params.get('default_result') +def create_lifecycle_hook(connection, module): + lch_name = module.params.get("lifecycle_hook_name") + asg_name = module.params.get("autoscaling_group_name") + transition = module.params.get("transition") + role_arn = module.params.get("role_arn") + notification_target_arn = module.params.get("notification_target_arn") + notification_meta_data = module.params.get("notification_meta_data") + heartbeat_timeout = module.params.get("heartbeat_timeout") + default_result = module.params.get("default_result") return_object = {} - return_object['changed'] = False + return_object["changed"] = False lch_params = { - 'LifecycleHookName': lch_name, - 'AutoScalingGroupName': asg_name, - 'LifecycleTransition': transition + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, + "LifecycleTransition": transition, } if role_arn: - lch_params['RoleARN'] = role_arn + lch_params["RoleARN"] = role_arn if notification_target_arn: - lch_params['NotificationTargetARN'] = notification_target_arn + lch_params["NotificationTargetARN"] = notification_target_arn if notification_meta_data: - lch_params['NotificationMetadata'] = notification_meta_data + lch_params["NotificationMetadata"] = notification_meta_data if heartbeat_timeout: - lch_params['HeartbeatTimeout'] = heartbeat_timeout + lch_params["HeartbeatTimeout"] = heartbeat_timeout if default_result: - lch_params['DefaultResult'] = default_result + lch_params["DefaultResult"] = default_result try: existing_hook = connection.describe_lifecycle_hooks( AutoScalingGroupName=asg_name, - LifecycleHookNames=[lch_name] - )['LifecycleHooks'] + LifecycleHookNames=[lch_name], + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get Lifecycle Hook") if not existing_hook: try: if module.check_mode: - module.exit_json(changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode.") - return_object['changed'] = True + module.exit_json( + changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode." + ) + return_object["changed"] = True connection.put_lifecycle_hook(**lch_params) - return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] + return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create LifecycleHook") @@ -201,11 +201,14 @@ def create_lifecycle_hook(connection, module): if modified: try: if module.check_mode: - module.exit_json(changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode.") - return_object['changed'] = True + module.exit_json( + changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode." + ) + return_object["changed"] = True connection.put_lifecycle_hook(**lch_params) - return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] + return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create LifecycleHook") @@ -229,33 +232,37 @@ def dict_compare(d1, d2): def delete_lifecycle_hook(connection, module): - - lch_name = module.params.get('lifecycle_hook_name') - asg_name = module.params.get('autoscaling_group_name') + lch_name = module.params.get("lifecycle_hook_name") + asg_name = module.params.get("autoscaling_group_name") return_object = {} - return_object['changed'] = False + return_object["changed"] = False try: all_hooks = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name + AutoScalingGroupName=asg_name, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks") - for hook in all_hooks['LifecycleHooks']: - if hook['LifecycleHookName'] == lch_name: + for hook in all_hooks["LifecycleHooks"]: + if hook["LifecycleHookName"] == lch_name: lch_params = { - 'LifecycleHookName': lch_name, - 'AutoScalingGroupName': asg_name + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, } try: if module.check_mode: - module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode.") + module.exit_json( + changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode." + ) connection.delete_lifecycle_hook(**lch_params) - return_object['changed'] = True - return_object['lifecycle_hook_removed'] = {'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name} + return_object["changed"] = True + return_object["lifecycle_hook_removed"] = { + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, + } except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete LifecycleHook") else: @@ -266,34 +273,36 @@ def delete_lifecycle_hook(connection, module): def main(): argument_spec = dict( - autoscaling_group_name=dict(required=True, type='str'), - lifecycle_hook_name=dict(required=True, type='str'), - transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']), - role_arn=dict(type='str'), - notification_target_arn=dict(type='str'), - notification_meta_data=dict(type='str'), - heartbeat_timeout=dict(type='int'), - default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']), - state=dict(default='present', choices=['present', 'absent']) + autoscaling_group_name=dict(required=True, type="str"), + lifecycle_hook_name=dict(required=True, type="str"), + transition=dict( + type="str", choices=["autoscaling:EC2_INSTANCE_TERMINATING", "autoscaling:EC2_INSTANCE_LAUNCHING"] + ), + role_arn=dict(type="str"), + notification_target_arn=dict(type="str"), + notification_meta_data=dict(type="str"), + heartbeat_timeout=dict(type="int"), + default_result=dict(default="ABANDON", choices=["ABANDON", "CONTINUE"]), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[['state', 'present', ['transition']]], + required_if=[["state", "present", ["transition"]]], ) - state = module.params.get('state') + state = module.params.get("state") - connection = module.client('autoscaling') + connection = module.client("autoscaling") changed = False - if state == 'present': + if state == "present": create_lifecycle_hook(connection, module) - elif state == 'absent': + elif state == "absent": delete_lifecycle_hook(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py b/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py index a29389b0e..6d69d8492 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: autoscaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups version_added: 1.0.0 @@ -189,11 +187,12 @@ options: description: - The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' -EXAMPLES = ''' +""" + +EXAMPLES = r""" - name: Simple Scale Down policy community.aws.autoscaling_policy: state: present @@ -224,7 +223,7 @@ EXAMPLES = ''' asg_name: "application-asg" - name: create TargetTracking predefined policy - ec2_scaling_policy: + community.aws.autoscaling_policy: name: "predefined-policy-1" policy_type: TargetTrackingScaling target_tracking_config: @@ -235,7 +234,7 @@ EXAMPLES = ''' register: result - name: create TargetTracking predefined policy with resource_label - ec2_scaling_policy: + community.aws.autoscaling_policy: name: "predefined-policy-1" policy_type: TargetTrackingScaling target_tracking_config: @@ -247,7 +246,7 @@ EXAMPLES = ''' register: result - name: create TargetTrackingScaling custom policy - ec2_scaling_policy: + community.aws.autoscaling_policy: name: "custom-policy-1" policy_type: TargetTrackingScaling target_tracking_config: @@ -261,9 +260,9 @@ EXAMPLES = ''' target_value: 98.0 asg_name: asg-test-1 register: result -''' +""" -RETURN = ''' +RETURN = r""" adjustment_type: description: Scaling policy adjustment type. returned: always @@ -349,137 +348,146 @@ step_adjustments: returned: always type: int sample: 50 -''' +""" try: import botocore except ImportError: pass # caught by imported AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -def build_target_specification(target_tracking_config): +def build_target_specification(target_tracking_config): # Initialize an empty dict() for building TargetTrackingConfiguration policies, # which will be returned targetTrackingConfig = dict() - if target_tracking_config.get('target_value'): - targetTrackingConfig['TargetValue'] = target_tracking_config['target_value'] + if target_tracking_config.get("target_value"): + targetTrackingConfig["TargetValue"] = target_tracking_config["target_value"] - if target_tracking_config.get('disable_scalein'): - targetTrackingConfig['DisableScaleIn'] = target_tracking_config['disable_scalein'] + if target_tracking_config.get("disable_scalein"): + targetTrackingConfig["DisableScaleIn"] = target_tracking_config["disable_scalein"] else: # Accounting for boto3 response - targetTrackingConfig['DisableScaleIn'] = False + targetTrackingConfig["DisableScaleIn"] = False - if target_tracking_config['predefined_metric_spec'] is not None: + if target_tracking_config["predefined_metric_spec"] is not None: # Build spec for predefined_metric_spec - targetTrackingConfig['PredefinedMetricSpecification'] = dict() - if target_tracking_config['predefined_metric_spec'].get('predefined_metric_type'): - targetTrackingConfig['PredefinedMetricSpecification']['PredefinedMetricType'] = \ - target_tracking_config['predefined_metric_spec']['predefined_metric_type'] - - if target_tracking_config['predefined_metric_spec'].get('resource_label'): - targetTrackingConfig['PredefinedMetricSpecification']['ResourceLabel'] = \ - target_tracking_config['predefined_metric_spec']['resource_label'] - - elif target_tracking_config['customized_metric_spec'] is not None: + targetTrackingConfig["PredefinedMetricSpecification"] = dict() + if target_tracking_config["predefined_metric_spec"].get("predefined_metric_type"): + targetTrackingConfig["PredefinedMetricSpecification"]["PredefinedMetricType"] = target_tracking_config[ + "predefined_metric_spec" + ]["predefined_metric_type"] + + if target_tracking_config["predefined_metric_spec"].get("resource_label"): + targetTrackingConfig["PredefinedMetricSpecification"]["ResourceLabel"] = target_tracking_config[ + "predefined_metric_spec" + ]["resource_label"] + + elif target_tracking_config["customized_metric_spec"] is not None: # Build spec for customized_metric_spec - targetTrackingConfig['CustomizedMetricSpecification'] = dict() - if target_tracking_config['customized_metric_spec'].get('metric_name'): - targetTrackingConfig['CustomizedMetricSpecification']['MetricName'] = \ - target_tracking_config['customized_metric_spec']['metric_name'] - - if target_tracking_config['customized_metric_spec'].get('namespace'): - targetTrackingConfig['CustomizedMetricSpecification']['Namespace'] = \ - target_tracking_config['customized_metric_spec']['namespace'] - - if target_tracking_config['customized_metric_spec'].get('dimensions'): - targetTrackingConfig['CustomizedMetricSpecification']['Dimensions'] = \ - target_tracking_config['customized_metric_spec']['dimensions'] - - if target_tracking_config['customized_metric_spec'].get('statistic'): - targetTrackingConfig['CustomizedMetricSpecification']['Statistic'] = \ - target_tracking_config['customized_metric_spec']['statistic'] - - if target_tracking_config['customized_metric_spec'].get('unit'): - targetTrackingConfig['CustomizedMetricSpecification']['Unit'] = \ - target_tracking_config['customized_metric_spec']['unit'] + targetTrackingConfig["CustomizedMetricSpecification"] = dict() + if target_tracking_config["customized_metric_spec"].get("metric_name"): + targetTrackingConfig["CustomizedMetricSpecification"]["MetricName"] = target_tracking_config[ + "customized_metric_spec" + ]["metric_name"] + + if target_tracking_config["customized_metric_spec"].get("namespace"): + targetTrackingConfig["CustomizedMetricSpecification"]["Namespace"] = target_tracking_config[ + "customized_metric_spec" + ]["namespace"] + + if target_tracking_config["customized_metric_spec"].get("dimensions"): + targetTrackingConfig["CustomizedMetricSpecification"]["Dimensions"] = target_tracking_config[ + "customized_metric_spec" + ]["dimensions"] + + if target_tracking_config["customized_metric_spec"].get("statistic"): + targetTrackingConfig["CustomizedMetricSpecification"]["Statistic"] = target_tracking_config[ + "customized_metric_spec" + ]["statistic"] + + if target_tracking_config["customized_metric_spec"].get("unit"): + targetTrackingConfig["CustomizedMetricSpecification"]["Unit"] = target_tracking_config[ + "customized_metric_spec" + ]["unit"] return targetTrackingConfig def create_scaling_policy(connection, module): changed = False - asg_name = module.params['asg_name'] - policy_type = module.params['policy_type'] - policy_name = module.params['name'] - - if policy_type == 'TargetTrackingScaling': - params = dict(PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name) + asg_name = module.params["asg_name"] + policy_type = module.params["policy_type"] + policy_name = module.params["name"] + + if policy_type == "TargetTrackingScaling": + params = dict(PolicyName=policy_name, PolicyType=policy_type, AutoScalingGroupName=asg_name) else: - params = dict(PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name, - AdjustmentType=module.params['adjustment_type']) + params = dict( + PolicyName=policy_name, + PolicyType=policy_type, + AutoScalingGroupName=asg_name, + AdjustmentType=module.params["adjustment_type"], + ) # min_adjustment_step attribute is only relevant if the adjustment_type # is set to percentage change in capacity, so it is a special case - if module.params['adjustment_type'] == 'PercentChangeInCapacity': - if module.params['min_adjustment_step']: - params['MinAdjustmentMagnitude'] = module.params['min_adjustment_step'] + if module.params["adjustment_type"] == "PercentChangeInCapacity": + if module.params["min_adjustment_step"]: + params["MinAdjustmentMagnitude"] = module.params["min_adjustment_step"] - if policy_type == 'SimpleScaling': + if policy_type == "SimpleScaling": # can't use required_if because it doesn't allow multiple criteria - # it's only required if policy is SimpleScaling and state is present - if not module.params['scaling_adjustment']: - module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling ' - 'and state is present') - params['ScalingAdjustment'] = module.params['scaling_adjustment'] - if module.params['cooldown']: - params['Cooldown'] = module.params['cooldown'] - - elif policy_type == 'StepScaling': - if not module.params['step_adjustments']: - module.fail_json(msg='step_adjustments is required when policy_type is StepScaling' - 'and state is present') - params['StepAdjustments'] = [] - for step_adjustment in module.params['step_adjustments']: - step_adjust_params = dict( - ScalingAdjustment=step_adjustment['scaling_adjustment']) - if step_adjustment.get('lower_bound'): - step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound'] - if step_adjustment.get('upper_bound'): - step_adjust_params['MetricIntervalUpperBound'] = step_adjustment['upper_bound'] - params['StepAdjustments'].append(step_adjust_params) - if module.params['metric_aggregation']: - params['MetricAggregationType'] = module.params['metric_aggregation'] - if module.params['estimated_instance_warmup']: - params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] - - elif policy_type == 'TargetTrackingScaling': - if not module.params['target_tracking_config']: - module.fail_json(msg='target_tracking_config is required when policy_type is ' - 'TargetTrackingScaling and state is present') + if not module.params["scaling_adjustment"]: + module.fail_json( + msg="scaling_adjustment is required when policy_type is SimpleScaling and state is present" + ) + params["ScalingAdjustment"] = module.params["scaling_adjustment"] + if module.params["cooldown"]: + params["Cooldown"] = module.params["cooldown"] + + elif policy_type == "StepScaling": + if not module.params["step_adjustments"]: + module.fail_json(msg="step_adjustments is required when policy_type is StepScaling and state is present") + params["StepAdjustments"] = [] + for step_adjustment in module.params["step_adjustments"]: + step_adjust_params = dict(ScalingAdjustment=step_adjustment["scaling_adjustment"]) + if step_adjustment.get("lower_bound"): + step_adjust_params["MetricIntervalLowerBound"] = step_adjustment["lower_bound"] + if step_adjustment.get("upper_bound"): + step_adjust_params["MetricIntervalUpperBound"] = step_adjustment["upper_bound"] + params["StepAdjustments"].append(step_adjust_params) + if module.params["metric_aggregation"]: + params["MetricAggregationType"] = module.params["metric_aggregation"] + if module.params["estimated_instance_warmup"]: + params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] + + elif policy_type == "TargetTrackingScaling": + if not module.params["target_tracking_config"]: + module.fail_json( + msg="target_tracking_config is required when policy_type is TargetTrackingScaling and state is present" + ) else: - params['TargetTrackingConfiguration'] = build_target_specification(module.params.get('target_tracking_config')) - if module.params['estimated_instance_warmup']: - params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] + params["TargetTrackingConfiguration"] = build_target_specification( + module.params.get("target_tracking_config") + ) + if module.params["estimated_instance_warmup"]: + params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] # Ensure idempotency with policies try: - policies = connection.describe_policies(aws_retry=True, - AutoScalingGroupName=asg_name, - PolicyNames=[policy_name])['ScalingPolicies'] + policies = connection.describe_policies( + aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] + )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") before = after = {} if not policies: @@ -499,41 +507,39 @@ def create_scaling_policy(connection, module): module.fail_json_aws(e, msg="Failed to create autoscaling policy") try: - policies = connection.describe_policies(aws_retry=True, - AutoScalingGroupName=asg_name, - PolicyNames=[policy_name])['ScalingPolicies'] + policies = connection.describe_policies( + aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] + )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") policy = camel_dict_to_snake_dict(policies[0]) # Backward compatible return values - policy['arn'] = policy['policy_arn'] - policy['as_name'] = policy['auto_scaling_group_name'] - policy['name'] = policy['policy_name'] + policy["arn"] = policy["policy_arn"] + policy["as_name"] = policy["auto_scaling_group_name"] + policy["name"] = policy["policy_name"] if before and after: - module.exit_json(changed=changed, diff=dict( - before=before, after=after), **policy) + module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy) else: module.exit_json(changed=changed, **policy) def delete_scaling_policy(connection, module): - policy_name = module.params.get('name') + policy_name = module.params.get("name") try: - policy = connection.describe_policies( - aws_retry=True, PolicyNames=[policy_name]) + policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") - if policy['ScalingPolicies']: + if policy["ScalingPolicies"]: try: - connection.delete_policy(aws_retry=True, - AutoScalingGroupName=policy['ScalingPolicies'][0]['AutoScalingGroupName'], - PolicyName=policy_name) + connection.delete_policy( + aws_retry=True, + AutoScalingGroupName=policy["ScalingPolicies"][0]["AutoScalingGroupName"], + PolicyName=policy_name, + ) module.exit_json(changed=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete autoscaling policy") @@ -543,65 +549,62 @@ def delete_scaling_policy(connection, module): def main(): step_adjustment_spec = dict( - lower_bound=dict(type='int'), - upper_bound=dict(type='int'), - scaling_adjustment=dict(type='int', required=True) + lower_bound=dict(type="int"), upper_bound=dict(type="int"), scaling_adjustment=dict(type="int", required=True) ) predefined_metric_spec = dict( - predefined_metric_type=dict(type='str', choices=['ASGAverageCPUUtilization', - 'ASGAverageNetworkIn', - 'ASGAverageNetworkOut', - 'ALBRequestCountPerTarget'], required=True), - resource_label=dict(type='str') + predefined_metric_type=dict( + type="str", + choices=[ + "ASGAverageCPUUtilization", + "ASGAverageNetworkIn", + "ASGAverageNetworkOut", + "ALBRequestCountPerTarget", + ], + required=True, + ), + resource_label=dict(type="str"), ) customized_metric_spec = dict( - metric_name=dict(type='str', required=True), - namespace=dict(type='str', required=True), - statistic=dict(type='str', required=True, choices=['Average', 'Minimum', 'Maximum', 'SampleCount', 'Sum']), - dimensions=dict(type='list', elements='dict'), - unit=dict(type='str') + metric_name=dict(type="str", required=True), + namespace=dict(type="str", required=True), + statistic=dict(type="str", required=True, choices=["Average", "Minimum", "Maximum", "SampleCount", "Sum"]), + dimensions=dict(type="list", elements="dict"), + unit=dict(type="str"), ) target_tracking_spec = dict( - disable_scalein=dict(type='bool'), - target_value=dict(type='float', required=True), - predefined_metric_spec=dict(type='dict', - options=predefined_metric_spec), - customized_metric_spec=dict(type='dict', - options=customized_metric_spec) + disable_scalein=dict(type="bool"), + target_value=dict(type="float", required=True), + predefined_metric_spec=dict(type="dict", options=predefined_metric_spec), + customized_metric_spec=dict(type="dict", options=customized_metric_spec), ) argument_spec = dict( name=dict(required=True), - adjustment_type=dict(choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), + adjustment_type=dict(choices=["ChangeInCapacity", "ExactCapacity", "PercentChangeInCapacity"]), asg_name=dict(), - scaling_adjustment=dict(type='int'), - min_adjustment_step=dict(type='int'), - cooldown=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - metric_aggregation=dict(default='Average', choices=[ - 'Minimum', 'Maximum', 'Average']), - policy_type=dict(default='SimpleScaling', choices=[ - 'SimpleScaling', 'StepScaling', 'TargetTrackingScaling']), - target_tracking_config=dict(type='dict', options=target_tracking_spec), - step_adjustments=dict( - type='list', options=step_adjustment_spec, elements='dict'), - estimated_instance_warmup=dict(type='int') + scaling_adjustment=dict(type="int"), + min_adjustment_step=dict(type="int"), + cooldown=dict(type="int"), + state=dict(default="present", choices=["present", "absent"]), + metric_aggregation=dict(default="Average", choices=["Minimum", "Maximum", "Average"]), + policy_type=dict(default="SimpleScaling", choices=["SimpleScaling", "StepScaling", "TargetTrackingScaling"]), + target_tracking_config=dict(type="dict", options=target_tracking_spec), + step_adjustments=dict(type="list", options=step_adjustment_spec, elements="dict"), + estimated_instance_warmup=dict(type="int"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['asg_name']]]) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["asg_name"]]]) - connection = module.client( - 'autoscaling', retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get('state') + connection = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) + state = module.params.get("state") - if state == 'present': + if state == "present": create_scaling_policy(connection, module) - elif state == 'absent': + elif state == "absent": delete_scaling_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py b/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py index f1433c522..9bfb70b83 100644 --- a/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py +++ b/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -6,10 +7,7 @@ # Based off of https://github.com/mmochan/ansible-aws-ec2-asg-scheduled-actions/blob/master/library/ec2_asg_scheduled_action.py # (c) 2016, Mike Mochan <@mmochan> -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_scheduled_action version_added: 2.2.0 @@ -67,14 +65,15 @@ options: required: false default: present choices: ['present', 'absent'] -author: Mark Woolley(@marknet15) +author: + - Mark Woolley(@marknet15) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create a scheduled action for a autoscaling group. - name: Create a minimal scheduled action for autoscaling group community.aws.autoscaling_scheduled_action: @@ -108,9 +107,9 @@ EXAMPLES = r''' autoscaling_group_name: test_asg scheduled_action_name: test_scheduled_action state: absent -''' +""" -RETURN = r''' +RETURN = r""" scheduled_action_name: description: The name of the scheduled action. returned: when I(state=present) @@ -151,7 +150,7 @@ desired_capacity: returned: when I(state=present) type: int sample: 1 -''' +""" try: import botocore @@ -160,39 +159,41 @@ except ImportError: try: from dateutil.parser import parse as timedate_parse + HAS_DATEUTIL = True except ImportError: HAS_DATEUTIL = False -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def format_request(): params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionName=module.params.get('scheduled_action_name'), - Recurrence=module.params.get('recurrence') + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionName=module.params.get("scheduled_action_name"), + Recurrence=module.params.get("recurrence"), ) # Some of these params are optional - if module.params.get('desired_capacity') is not None: - params['DesiredCapacity'] = module.params.get('desired_capacity') + if module.params.get("desired_capacity") is not None: + params["DesiredCapacity"] = module.params.get("desired_capacity") - if module.params.get('min_size') is not None: - params['MinSize'] = module.params.get('min_size') + if module.params.get("min_size") is not None: + params["MinSize"] = module.params.get("min_size") - if module.params.get('max_size') is not None: - params['MaxSize'] = module.params.get('max_size') + if module.params.get("max_size") is not None: + params["MaxSize"] = module.params.get("max_size") - if module.params.get('time_zone') is not None: - params['TimeZone'] = module.params.get('time_zone') + if module.params.get("time_zone") is not None: + params["TimeZone"] = module.params.get("time_zone") - if module.params.get('start_time') is not None: - params['StartTime'] = module.params.get('start_time') + if module.params.get("start_time") is not None: + params["StartTime"] = module.params.get("start_time") - if module.params.get('end_time') is not None: - params['EndTime'] = module.params.get('end_time') + if module.params.get("end_time") is not None: + params["EndTime"] = module.params.get("end_time") return params @@ -205,8 +206,8 @@ def delete_scheduled_action(current_actions): return True params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionName=module.params.get('scheduled_action_name') + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionName=module.params.get("scheduled_action_name"), ) try: @@ -219,8 +220,8 @@ def delete_scheduled_action(current_actions): def get_scheduled_actions(): params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionNames=[module.params.get('scheduled_action_name')] + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionNames=[module.params.get("scheduled_action_name")], ) try: @@ -270,55 +271,53 @@ def main(): global client argument_spec = dict( - autoscaling_group_name=dict(required=True, type='str'), - scheduled_action_name=dict(required=True, type='str'), - start_time=dict(default=None, type='str'), - end_time=dict(default=None, type='str'), - time_zone=dict(default=None, type='str'), - recurrence=dict(type='str'), - min_size=dict(default=None, type='int'), - max_size=dict(default=None, type='int'), - desired_capacity=dict(default=None, type='int'), - state=dict(default='present', choices=['present', 'absent']) + autoscaling_group_name=dict(required=True, type="str"), + scheduled_action_name=dict(required=True, type="str"), + start_time=dict(default=None, type="str"), + end_time=dict(default=None, type="str"), + time_zone=dict(default=None, type="str"), + recurrence=dict(type="str"), + min_size=dict(default=None, type="int"), + max_size=dict(default=None, type="int"), + desired_capacity=dict(default=None, type="int"), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[['state', 'present', ['recurrence']]], - supports_check_mode=True + argument_spec=argument_spec, required_if=[["state", "present", ["recurrence"]]], supports_check_mode=True ) if not HAS_DATEUTIL: - module.fail_json(msg='dateutil is required for this module') + module.fail_json(msg="dateutil is required for this module") if not module.botocore_at_least("1.20.24"): - module.fail_json(msg='botocore version >= 1.20.24 is required for this module') + module.fail_json(msg="botocore version >= 1.20.24 is required for this module") - client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) current_actions = get_scheduled_actions() - state = module.params.get('state') + state = module.params.get("state") results = dict() - if state == 'present': + if state == "present": changed = put_scheduled_update_group_action(current_actions) if not module.check_mode: updated_action = get_scheduled_actions()[0] results = dict( - scheduled_action_name=updated_action.get('ScheduledActionName'), - start_time=updated_action.get('StartTime'), - end_time=updated_action.get('EndTime'), - time_zone=updated_action.get('TimeZone'), - recurrence=updated_action.get('Recurrence'), - min_size=updated_action.get('MinSize'), - max_size=updated_action.get('MaxSize'), - desired_capacity=updated_action.get('DesiredCapacity') + scheduled_action_name=updated_action.get("ScheduledActionName"), + start_time=updated_action.get("StartTime"), + end_time=updated_action.get("EndTime"), + time_zone=updated_action.get("TimeZone"), + recurrence=updated_action.get("Recurrence"), + min_size=updated_action.get("MinSize"), + max_size=updated_action.get("MaxSize"), + desired_capacity=updated_action.get("DesiredCapacity"), ) else: changed = delete_scheduled_action(current_actions) - results['changed'] = changed + results["changed"] = changed module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/aws_region_info.py b/ansible_collections/community/aws/plugins/modules/aws_region_info.py deleted file mode 100644 index 126455a8c..000000000 --- a/ansible_collections/community/aws/plugins/modules/aws_region_info.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' -module: aws_region_info -short_description: Gather information about AWS regions -version_added: 1.0.0 -description: - - Gather information about AWS regions. -author: - - 'Henrique Rodrigues (@Sodki)' -options: - filters: - description: - - A dict of filters to apply. - - Each dict item consists of a filter key and a filter value. - - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters. - - Filter names and values are case sensitive. - - You can use underscores instead of dashes (-) in the filter keys. - - Filter keys with underscores will take precedence in case of conflict. - default: {} - type: dict -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Gather information about all regions -- community.aws.aws_region_info: - -# Gather information about a single region -- community.aws.aws_region_info: - filters: - region-name: eu-west-1 -''' - -RETURN = ''' -regions: - returned: on success - description: > - Regions that match the provided filters. Each element consists of a dict with all the information related - to that region. - type: list - sample: "[{ - 'endpoint': 'ec2.us-west-1.amazonaws.com', - 'region_name': 'us-west-1' - }]" -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -try: - from botocore.exceptions import ClientError, BotoCoreError -except ImportError: - pass # Handled by AnsibleAWSModule - - -def main(): - argument_spec = dict( - filters=dict(default={}, type='dict') - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - - # Replace filter key underscores with dashes, for compatibility - sanitized_filters = dict(module.params.get('filters')) - for k in module.params.get('filters').keys(): - if "_" in k: - sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] - del sanitized_filters[k] - - try: - regions = connection.describe_regions( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe regions.") - - module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']]) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py b/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py index 555cfccbe..e9a17f9a0 100644 --- a/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py +++ b/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: batch_compute_environment version_added: 1.0.0 @@ -120,12 +118,12 @@ options: - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: My Batch Compute Environment community.aws.batch_compute_environment: compute_environment_name: computeEnvironmentName @@ -155,9 +153,9 @@ EXAMPLES = r''' - name: show results ansible.builtin.debug: var: aws_batch_compute_environment_action -''' +""" -RETURN = r''' +RETURN = r""" --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -167,15 +165,15 @@ output: changed: false invocation: module_args: - aws_access_key: ~ - aws_secret_key: ~ + access_key: ~ + secret_key: ~ bid_percentage: ~ compute_environment_name: <name> compute_environment_state: ENABLED compute_resource_type: EC2 desiredv_cpus: 0 ec2_key_pair: ~ - ec2_url: ~ + endpoint_url: ~ image_id: ~ instance_role: "arn:aws:iam::..." instance_types: @@ -222,17 +220,22 @@ output: statusReason: "ComputeEnvironment Healthy" type: MANAGED type: dict -''' +""" import re -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule # --------------------------------------------------------------------------------------------------- # @@ -240,6 +243,7 @@ except ImportError: # # --------------------------------------------------------------------------------------------------- + def set_api_params(module, module_params): """ Sets module parameters to those expected by the boto3 API. @@ -260,18 +264,16 @@ def validate_params(module): :return: """ - compute_environment_name = module.params['compute_environment_name'] + compute_environment_name = module.params["compute_environment_name"] # validate compute environment name - if not re.search(r'^[\w\_:]+$', compute_environment_name): + if not re.search(r"^[\w\_:]+$", compute_environment_name): module.fail_json( - msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters " - "and underscores.".format(compute_environment_name) + msg=f"Function compute_environment_name {compute_environment_name} is invalid. Names must contain only alphanumeric characters and underscores." ) - if not compute_environment_name.startswith('arn:aws:batch:'): + if not validate_aws_arn(compute_environment_name, service="batch"): if len(compute_environment_name) > 128: - module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit' - .format(compute_environment_name)) + module.fail_json(msg=f'compute_environment_name "{compute_environment_name}" exceeds 128 character limit') return @@ -282,13 +284,14 @@ def validate_params(module): # # --------------------------------------------------------------------------------------------------- + def get_current_compute_environment(module, client): try: environments = client.describe_compute_environments( - computeEnvironments=[module.params['compute_environment_name']] + computeEnvironments=[module.params["compute_environment_name"]] ) - if len(environments['computeEnvironments']) > 0: - return environments['computeEnvironments'][0] + if len(environments["computeEnvironments"]) > 0: + return environments["computeEnvironments"][0] else: return None except ClientError: @@ -297,42 +300,52 @@ def get_current_compute_environment(module, client): def create_compute_environment(module, client): """ - Adds a Batch compute environment + Adds a Batch compute environment - :param module: - :param client: - :return: - """ + :param module: + :param client: + :return: + """ changed = False # set API parameters - params = ( - 'compute_environment_name', 'type', 'service_role') + params = ("compute_environment_name", "type", "service_role") api_params = set_api_params(module, params) - if module.params['compute_environment_state'] is not None: - api_params['state'] = module.params['compute_environment_state'] - - compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets', - 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage', - 'spot_iam_fleet_role') + if module.params["compute_environment_state"] is not None: + api_params["state"] = module.params["compute_environment_state"] + + compute_resources_param_list = ( + "minv_cpus", + "maxv_cpus", + "desiredv_cpus", + "instance_types", + "image_id", + "subnets", + "security_group_ids", + "ec2_key_pair", + "instance_role", + "tags", + "bid_percentage", + "spot_iam_fleet_role", + ) compute_resources_params = set_api_params(module, compute_resources_param_list) - if module.params['compute_resource_type'] is not None: - compute_resources_params['type'] = module.params['compute_resource_type'] + if module.params["compute_resource_type"] is not None: + compute_resources_params["type"] = module.params["compute_resource_type"] # if module.params['minv_cpus'] is not None: # compute_resources_params['minvCpus'] = module.params['minv_cpus'] - api_params['computeResources'] = compute_resources_params + api_params["computeResources"] = compute_resources_params try: if not module.check_mode: client.create_compute_environment(**api_params) changed = True except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Error creating compute environment') + module.fail_json_aws(e, msg="Error creating compute environment") return changed @@ -349,29 +362,29 @@ def remove_compute_environment(module, client): changed = False # set API parameters - api_params = {'computeEnvironment': module.params['compute_environment_name']} + api_params = {"computeEnvironment": module.params["compute_environment_name"]} try: if not module.check_mode: client.delete_compute_environment(**api_params) changed = True except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Error removing compute environment') + module.fail_json_aws(e, msg="Error removing compute environment") return changed def manage_state(module, client): changed = False - current_state = 'absent' - state = module.params['state'] - compute_environment_state = module.params['compute_environment_state'] - compute_environment_name = module.params['compute_environment_name'] - service_role = module.params['service_role'] - minv_cpus = module.params['minv_cpus'] - maxv_cpus = module.params['maxv_cpus'] - desiredv_cpus = module.params['desiredv_cpus'] - action_taken = 'none' - update_env_response = '' + current_state = "absent" + state = module.params["state"] + compute_environment_state = module.params["compute_environment_state"] + compute_environment_name = module.params["compute_environment_name"] + service_role = module.params["service_role"] + minv_cpus = module.params["minv_cpus"] + maxv_cpus = module.params["maxv_cpus"] + desiredv_cpus = module.params["desiredv_cpus"] + action_taken = "none" + update_env_response = "" check_mode = module.check_mode @@ -379,37 +392,40 @@ def manage_state(module, client): current_compute_environment = get_current_compute_environment(module, client) response = current_compute_environment if current_compute_environment: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": updates = False # Update Batch Compute Environment configuration - compute_kwargs = {'computeEnvironment': compute_environment_name} + compute_kwargs = {"computeEnvironment": compute_environment_name} # Update configuration if needed compute_resources = {} - if compute_environment_state and current_compute_environment['state'] != compute_environment_state: - compute_kwargs.update({'state': compute_environment_state}) + if compute_environment_state and current_compute_environment["state"] != compute_environment_state: + compute_kwargs.update({"state": compute_environment_state}) updates = True - if service_role and current_compute_environment['serviceRole'] != service_role: - compute_kwargs.update({'serviceRole': service_role}) + if service_role and current_compute_environment["serviceRole"] != service_role: + compute_kwargs.update({"serviceRole": service_role}) updates = True - if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus: - compute_resources['minvCpus'] = minv_cpus - if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus: - compute_resources['maxvCpus'] = maxv_cpus - if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus: - compute_resources['desiredvCpus'] = desiredv_cpus + if minv_cpus is not None and current_compute_environment["computeResources"]["minvCpus"] != minv_cpus: + compute_resources["minvCpus"] = minv_cpus + if maxv_cpus is not None and current_compute_environment["computeResources"]["maxvCpus"] != maxv_cpus: + compute_resources["maxvCpus"] = maxv_cpus + if ( + desiredv_cpus is not None + and current_compute_environment["computeResources"]["desiredvCpus"] != desiredv_cpus + ): + compute_resources["desiredvCpus"] = desiredv_cpus if len(compute_resources) > 0: - compute_kwargs['computeResources'] = compute_resources + compute_kwargs["computeResources"] = compute_resources updates = True if updates: try: if not check_mode: update_env_response = client.update_compute_environment(**compute_kwargs) if not update_env_response: - module.fail_json(msg='Unable to get compute environment information after creating') + module.fail_json(msg="Unable to get compute environment information after creating") changed = True action_taken = "updated" except (BotoCoreError, ClientError) as e: @@ -419,15 +435,15 @@ def manage_state(module, client): # Create Batch Compute Environment changed = create_compute_environment(module, client) # Describe compute environment - action_taken = 'added' + action_taken = "added" response = get_current_compute_environment(module, client) if not response: - module.fail_json(msg='Unable to get compute environment information after creating') + module.fail_json(msg="Unable to get compute environment information after creating") else: - if current_state == 'present': + if current_state == "present": # remove the compute environment changed = remove_compute_environment(module, client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, batch_compute_environment_action=action_taken, response=response) @@ -437,6 +453,7 @@ def manage_state(module, client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -445,39 +462,36 @@ def main(): """ argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), compute_environment_name=dict(required=True), - type=dict(required=True, choices=['MANAGED', 'UNMANAGED']), - compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), + type=dict(required=True, choices=["MANAGED", "UNMANAGED"]), + compute_environment_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), service_role=dict(required=True), - compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']), - minv_cpus=dict(type='int', required=True), - maxv_cpus=dict(type='int', required=True), - desiredv_cpus=dict(type='int'), - instance_types=dict(type='list', required=True, elements='str'), + compute_resource_type=dict(required=True, choices=["EC2", "SPOT"]), + minv_cpus=dict(type="int", required=True), + maxv_cpus=dict(type="int", required=True), + desiredv_cpus=dict(type="int"), + instance_types=dict(type="list", required=True, elements="str"), image_id=dict(), - subnets=dict(type='list', required=True, elements='str'), - security_group_ids=dict(type='list', required=True, elements='str'), + subnets=dict(type="list", required=True, elements="str"), + security_group_ids=dict(type="list", required=True, elements="str"), ec2_key_pair=dict(no_log=False), instance_role=dict(required=True), - tags=dict(type='dict'), - bid_percentage=dict(type='int'), + tags=dict(type="dict"), + bid_percentage=dict(type="int"), spot_iam_fleet_role=dict(), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('batch') + client = module.client("batch") validate_params(module) results = manage_state(module, client) - module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags'])) + module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=["Tags"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/batch_job_definition.py b/ansible_collections/community/aws/plugins/modules/batch_job_definition.py index 79ace0534..fb2b1996d 100644 --- a/ansible_collections/community/aws/plugins/modules/batch_job_definition.py +++ b/ansible_collections/community/aws/plugins/modules/batch_job_definition.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: batch_job_definition version_added: 1.0.0 @@ -179,12 +177,12 @@ options: many times. type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: My Batch Job Definition community.aws.batch_job_definition: @@ -207,9 +205,9 @@ EXAMPLES = r''' - name: show results ansible.builtin.debug: var=job_definition_create_result -''' +""" -RETURN = r''' +RETURN = r""" --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -223,17 +221,20 @@ output: status: INACTIVE type: container type: dict -''' - -from ansible_collections.amazon.aws.plugins.module_utils.batch import cc, set_api_params -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.batch import cc +from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule # --------------------------------------------------------------------------------------------------- # @@ -263,15 +264,15 @@ def validate_params(module, batch_client): # # --------------------------------------------------------------------------------------------------- + def get_current_job_definition(module, batch_client): try: - environments = batch_client.describe_job_definitions( - jobDefinitionName=module.params['job_definition_name'] - ) - if len(environments['jobDefinitions']) > 0: - latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions'])) - latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision), - None) + environments = batch_client.describe_job_definitions(jobDefinitionName=module.params["job_definition_name"]) + if len(environments["jobDefinitions"]) > 0: + latest_revision = max(map(lambda d: d["revision"], environments["jobDefinitions"])) + latest_definition = next( + (x for x in environments["jobDefinitions"] if x["revision"] == latest_revision), None + ) return latest_definition return None except ClientError: @@ -280,12 +281,12 @@ def get_current_job_definition(module, batch_client): def create_job_definition(module, batch_client): """ - Adds a Batch job definition + Adds a Batch job definition - :param module: - :param batch_client: - :return: - """ + :param module: + :param batch_client: + :return: + """ changed = False @@ -294,36 +295,48 @@ def create_job_definition(module, batch_client): container_properties_params = set_api_params(module, get_container_property_params()) retry_strategy_params = set_api_params(module, get_retry_strategy_params()) - api_params['retryStrategy'] = retry_strategy_params - api_params['containerProperties'] = container_properties_params + api_params["retryStrategy"] = retry_strategy_params + api_params["containerProperties"] = container_properties_params try: if not module.check_mode: batch_client.register_job_definition(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error registering job definition') + module.fail_json_aws(e, msg="Error registering job definition") return changed def get_retry_strategy_params(): - return ('attempts',) + return ("attempts",) def get_container_property_params(): - return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points', - 'readonly_root_filesystem', 'privileged', 'ulimits', 'user') + return ( + "image", + "vcpus", + "memory", + "command", + "job_role_arn", + "volumes", + "environment", + "mount_points", + "readonly_root_filesystem", + "privileged", + "ulimits", + "user", + ) def get_base_params(): - return 'job_definition_name', 'type', 'parameters' + return "job_definition_name", "type", "parameters" def get_compute_environment_order_list(module): compute_environment_order_list = [] - for ceo in module.params['compute_environment_order']: - compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + for ceo in module.params["compute_environment_order"]: + compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) return compute_environment_order_list @@ -340,10 +353,10 @@ def remove_job_definition(module, batch_client): try: if not module.check_mode: - batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn']) + batch_client.deregister_job_definition(jobDefinition=module.params["job_definition_arn"]) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error removing job definition') + module.fail_json_aws(e, msg="Error removing job definition") return changed @@ -356,12 +369,12 @@ def job_definition_equal(module, current_definition): break for param in get_container_property_params(): - if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)): + if module.params.get(param) != current_definition.get("containerProperties").get(cc(param)): equal = False break for param in get_retry_strategy_params(): - if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)): + if module.params.get(param) != current_definition.get("retryStrategy").get(cc(param)): equal = False break @@ -370,10 +383,10 @@ def job_definition_equal(module, current_definition): def manage_state(module, batch_client): changed = False - current_state = 'absent' - state = module.params['state'] - job_definition_name = module.params['job_definition_name'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + job_definition_name = module.params["job_definition_name"] + action_taken = "none" response = None check_mode = module.check_mode @@ -381,28 +394,28 @@ def manage_state(module, batch_client): # check if the job definition exists current_job_definition = get_current_job_definition(module, batch_client) if current_job_definition: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": # check if definition has changed and register a new version if necessary if not job_definition_equal(module, current_job_definition): create_job_definition(module, batch_client) - action_taken = 'updated with new version' + action_taken = "updated with new version" changed = True else: # Create Job definition changed = create_job_definition(module, batch_client) - action_taken = 'added' + action_taken = "added" response = get_current_job_definition(module, batch_client) if not response: - module.fail_json(msg='Unable to get job definition information after creating/updating') + module.fail_json(msg="Unable to get job definition information after creating/updating") else: - if current_state == 'present': + if current_state == "present": # remove the Job definition changed = remove_job_definition(module, batch_client) - action_taken = 'deregistered' + action_taken = "deregistered" return dict(changed=changed, batch_job_definition_action=action_taken, response=response) @@ -412,6 +425,7 @@ def manage_state(module, batch_client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -420,32 +434,29 @@ def main(): """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), job_definition_name=dict(required=True), job_definition_arn=dict(), type=dict(required=True), - parameters=dict(type='dict'), + parameters=dict(type="dict"), image=dict(required=True), - vcpus=dict(type='int', required=True), - memory=dict(type='int', required=True), - command=dict(type='list', default=[], elements='str'), + vcpus=dict(type="int", required=True), + memory=dict(type="int", required=True), + command=dict(type="list", default=[], elements="str"), job_role_arn=dict(), - volumes=dict(type='list', default=[], elements='dict'), - environment=dict(type='list', default=[], elements='dict'), - mount_points=dict(type='list', default=[], elements='dict'), + volumes=dict(type="list", default=[], elements="dict"), + environment=dict(type="list", default=[], elements="dict"), + mount_points=dict(type="list", default=[], elements="dict"), readonly_root_filesystem=dict(), privileged=dict(), - ulimits=dict(type='list', default=[], elements='dict'), + ulimits=dict(type="list", default=[], elements="dict"), user=dict(), - attempts=dict(type='int') + attempts=dict(type="int"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - batch_client = module.client('batch') + batch_client = module.client("batch") validate_params(module, batch_client) @@ -454,5 +465,5 @@ def main(): module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/batch_job_queue.py b/ansible_collections/community/aws/plugins/modules/batch_job_queue.py index ef48896a4..4be42cbc5 100644 --- a/ansible_collections/community/aws/plugins/modules/batch_job_queue.py +++ b/ansible_collections/community/aws/plugins/modules/batch_job_queue.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: batch_job_queue version_added: 1.0.0 @@ -63,12 +61,12 @@ options: type: str description: The name of the compute environment. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: My Batch Job Queue community.aws.batch_job_queue: job_queue_name: jobQueueName @@ -77,18 +75,18 @@ EXAMPLES = ''' job_queue_state: ENABLED priority: 1 compute_environment_order: - - order: 1 - compute_environment: my_compute_env1 - - order: 2 - compute_environment: my_compute_env2 + - order: 1 + compute_environment: my_compute_env1 + - order: 2 + compute_environment: my_compute_env2 register: batch_job_queue_action - name: show results ansible.builtin.debug: var: batch_job_queue_action -''' +""" -RETURN = r''' +RETURN = r""" --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -104,17 +102,20 @@ output: status: UPDATING status_reason: "JobQueue Healthy" type: dict -''' - -from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + # --------------------------------------------------------------------------------------------------- # # Helper Functions & classes @@ -137,50 +138,49 @@ def validate_params(module): # # --------------------------------------------------------------------------------------------------- + def get_current_job_queue(module, client): try: - environments = client.describe_job_queues( - jobQueues=[module.params['job_queue_name']] - ) - return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None + environments = client.describe_job_queues(jobQueues=[module.params["job_queue_name"]]) + return environments["jobQueues"][0] if len(environments["jobQueues"]) > 0 else None except ClientError: return None def create_job_queue(module, client): """ - Adds a Batch job queue + Adds a Batch job queue - :param module: - :param client: - :return: - """ + :param module: + :param client: + :return: + """ changed = False # set API parameters - params = ('job_queue_name', 'priority') + params = ("job_queue_name", "priority") api_params = set_api_params(module, params) - if module.params['job_queue_state'] is not None: - api_params['state'] = module.params['job_queue_state'] + if module.params["job_queue_state"] is not None: + api_params["state"] = module.params["job_queue_state"] - api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module) + api_params["computeEnvironmentOrder"] = get_compute_environment_order_list(module) try: if not module.check_mode: client.create_job_queue(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error creating compute environment') + module.fail_json_aws(e, msg="Error creating compute environment") return changed def get_compute_environment_order_list(module): compute_environment_order_list = [] - for ceo in module.params['compute_environment_order']: - compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + for ceo in module.params["compute_environment_order"]: + compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) return compute_environment_order_list @@ -196,25 +196,25 @@ def remove_job_queue(module, client): changed = False # set API parameters - api_params = {'jobQueue': module.params['job_queue_name']} + api_params = {"jobQueue": module.params["job_queue_name"]} try: if not module.check_mode: client.delete_job_queue(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error removing job queue') + module.fail_json_aws(e, msg="Error removing job queue") return changed def manage_state(module, client): changed = False - current_state = 'absent' - state = module.params['state'] - job_queue_state = module.params['job_queue_state'] - job_queue_name = module.params['job_queue_name'] - priority = module.params['priority'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + job_queue_state = module.params["job_queue_state"] + job_queue_name = module.params["job_queue_name"] + priority = module.params["priority"] + action_taken = "none" response = None check_mode = module.check_mode @@ -222,25 +222,25 @@ def manage_state(module, client): # check if the job queue exists current_job_queue = get_current_job_queue(module, client) if current_job_queue: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": updates = False # Update Batch Job Queue configuration - job_kwargs = {'jobQueue': job_queue_name} + job_kwargs = {"jobQueue": job_queue_name} # Update configuration if needed - if job_queue_state and current_job_queue['state'] != job_queue_state: - job_kwargs.update({'state': job_queue_state}) + if job_queue_state and current_job_queue["state"] != job_queue_state: + job_kwargs.update({"state": job_queue_state}) updates = True - if priority is not None and current_job_queue['priority'] != priority: - job_kwargs.update({'priority': priority}) + if priority is not None and current_job_queue["priority"] != priority: + job_kwargs.update({"priority": priority}) updates = True new_compute_environment_order_list = get_compute_environment_order_list(module) - if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']: - job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list + if new_compute_environment_order_list != current_job_queue["computeEnvironmentOrder"]: + job_kwargs["computeEnvironmentOrder"] = new_compute_environment_order_list updates = True if updates: @@ -255,17 +255,17 @@ def manage_state(module, client): else: # Create Job Queue changed = create_job_queue(module, client) - action_taken = 'added' + action_taken = "added" # Describe job queue response = get_current_job_queue(module, client) if not response: - module.fail_json(msg='Unable to get job queue information after creating/updating') + module.fail_json(msg="Unable to get job queue information after creating/updating") else: - if current_state == 'present': + if current_state == "present": # remove the Job Queue changed = remove_job_queue(module, client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, batch_job_queue_action=action_taken, response=response) @@ -275,6 +275,7 @@ def manage_state(module, client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -283,19 +284,16 @@ def main(): """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), job_queue_name=dict(required=True), - job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), - priority=dict(type='int', required=True), - compute_environment_order=dict(type='list', required=True, elements='dict'), + job_queue_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), + priority=dict(type="int", required=True), + compute_environment_order=dict(type="list", required=True, elements="dict"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('batch') + client = module.client("batch") validate_params(module) @@ -304,5 +302,5 @@ def main(): module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py b/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py index f7e71e2f8..ff32b2124 100644 --- a/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py +++ b/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: cloudformation_exports_info short_description: Read a value from CloudFormation Exports version_added: 1.0.0 @@ -15,63 +13,60 @@ description: author: - "Michael Moyle (@mmoyle)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get Exports - community.aws.cloudformation_exports_info: - profile: 'my_aws_profile' - region: 'my_region' + community.aws.cloudformation_exports_info: {} register: cf_exports - ansible.builtin.debug: msg: "{{ cf_exports }}" -''' +""" -RETURN = ''' +RETURN = r""" export_items: description: A dictionary of Exports items names and values. returned: Always type: dict -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +""" try: - from botocore.exceptions import ClientError from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + @AWSRetry.exponential_backoff() def list_exports(cloudformation_client): - '''Get Exports Names and Values and return in dictionary ''' - list_exports_paginator = cloudformation_client.get_paginator('list_exports') - exports = list_exports_paginator.paginate().build_full_result()['Exports'] + """Get Exports Names and Values and return in dictionary""" + list_exports_paginator = cloudformation_client.get_paginator("list_exports") + exports = list_exports_paginator.paginate().build_full_result()["Exports"] export_items = dict() for item in exports: - export_items[item['Name']] = item['Value'] + export_items[item["Name"]] = item["Value"] return export_items def main(): argument_spec = dict() - result = dict( - changed=False, - original_message='' - ) + result = dict(changed=False, original_message="") module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - cloudformation_client = module.client('cloudformation') + cloudformation_client = module.client("cloudformation") try: - result['export_items'] = list_exports(cloudformation_client) + result["export_items"] = list_exports(cloudformation_client) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e) @@ -80,5 +75,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py b/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py index c6771db5e..ebb9403e8 100644 --- a/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py +++ b/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py @@ -1,20 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: cloudformation_stack_set version_added: 1.0.0 short_description: Manage groups of CloudFormation stacks description: - - Launches/updates/deletes AWS CloudFormation Stack Sets. + - Launches/updates/deletes AWS CloudFormation Stack Sets. notes: - - To make an individual stack, you want the M(amazon.aws.cloudformation) module. + - To make an individual stack, you want the M(amazon.aws.cloudformation) module. options: name: description: @@ -169,14 +167,15 @@ options: - Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual count may be lower. -author: "Ryan Scott Brown (@ryansb)" +author: + - "Ryan Scott Brown (@ryansb)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a stack set with instances in two accounts community.aws.cloudformation_stack_set: name: my-stack @@ -202,7 +201,7 @@ EXAMPLES = r''' - 123456789012 - 234567890123 regions: - - us-east-1 + - us-east-1 - name: The same type of update, but wait for the update to complete in all stacks community.aws.cloudformation_stack_set: @@ -218,7 +217,7 @@ EXAMPLES = r''' - 123456789012 - 234567890123 regions: - - us-east-1 + - us-east-1 - name: Register new accounts (create new stack instances) with an existing stack set. community.aws.cloudformation_stack_set: @@ -235,10 +234,10 @@ EXAMPLES = r''' - 234567890123 - 345678901234 regions: - - us-east-1 -''' + - us-east-1 +""" -RETURN = r''' +RETURN = r""" operations_log: type: list description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. @@ -316,8 +315,7 @@ stack_set: other: Type: "AWS::SNS::Topic" Properties: {} - -''' # NOQA +""" import datetime import itertools @@ -325,7 +323,8 @@ import time import uuid try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # handled by AnsibleAWSModule pass @@ -333,19 +332,20 @@ except ImportError: from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def create_stack_set(module, stack_params, cfn): try: cfn.create_stack_set(aws_retry=True, **stack_params) - return await_stack_set_exists(cfn, stack_params['StackSetName']) + return await_stack_set_exists(cfn, stack_params["StackSetName"]) except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName'))) + module.fail_json_aws(err, msg=f"Failed to create stack set {stack_params.get('StackSetName')}.") def update_stack_set(module, stack_params, cfn): @@ -354,22 +354,34 @@ def update_stack_set(module, stack_params, cfn): # don't need to be updated. try: cfn.update_stack_set(**stack_params) - except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.") - except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except - module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check " - "the `accounts` and `regions` parameters.") - except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except + except is_boto3_error_code("StackInstanceNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws( - err, msg="Another operation is already in progress on this stack set - please try again later. When making " - "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.") + err, + msg=( + "One or more stack instances were not found for this stack set. Double check " + "the `accounts` and `regions` parameters." + ), + ) + except is_boto3_error_code("OperationInProgressException") as err: # pylint: disable=duplicate-except + module.fail_json_aws( + err, + msg=( + "Another operation is already in progress on this stack set - please try again later. When making" + " multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op" + " errors." + ), + ) except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") - if module.params.get('wait'): + if module.params.get("wait"): await_stack_set_operation( - module, cfn, operation_id=stack_params['OperationId'], - stack_set_name=stack_params['StackSetName'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=stack_params["OperationId"], + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), ) return True @@ -379,20 +391,24 @@ def compare_stack_instances(cfn, stack_set_name, accounts, regions): instance_list = cfn.list_stack_instances( aws_retry=True, StackSetName=stack_set_name, - )['Summaries'] + )["Summaries"] desired_stack_instances = set(itertools.product(accounts, regions)) - existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list) + existing_stack_instances = set((i["Account"], i["Region"]) for i in instance_list) # new stacks, existing stacks, unspecified stacks - return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances) + return ( + (desired_stack_instances - existing_stack_instances), + existing_stack_instances, + (existing_stack_instances - desired_stack_instances), + ) @AWSRetry.jittered_backoff(retries=3, delay=4) def stack_set_facts(cfn, stack_set_name): try: - ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet'] - ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) + ss = cfn.describe_stack_set(StackSetName=stack_set_name)["StackSet"] + ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) return ss - except cfn.exceptions.from_code('StackSetNotFound'): + except cfn.exceptions.from_code("StackSetNotFound"): # Return None if the stack doesn't exist return @@ -403,29 +419,29 @@ def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wai for i in range(max_wait // 15): try: operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id) - if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'): + if operation["StackSetOperation"]["Status"] not in ("RUNNING", "STOPPING"): # Stack set has completed operation break - except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except pass - except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except + except is_boto3_error_code("OperationNotFound"): # pylint: disable=duplicate-except pass time.sleep(15) - if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'): + if operation and operation["StackSetOperation"]["Status"] not in ("FAILED", "STOPPED"): await_stack_instance_completion( - module, cfn, + module, + cfn, stack_set_name=stack_set_name, # subtract however long we waited already max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()), ) - elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'): + elif operation and operation["StackSetOperation"]["Status"] in ("FAILED", "STOPPED"): pass else: module.warn( - "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format( - operation_id, stack_set_name, max_wait - ) + f"Timed out waiting for operation {operation_id} on stack set {stack_set_name} after {max_wait} seconds." + " Returning unfinished operation" ) @@ -434,84 +450,83 @@ def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): for i in range(max_wait // 15): try: stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name) - to_await = [inst for inst in stack_instances['Summaries'] - if inst['Status'] != 'CURRENT'] + to_await = [inst for inst in stack_instances["Summaries"] if inst["Status"] != "CURRENT"] if not to_await: - return stack_instances['Summaries'] - except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + return stack_instances["Summaries"] + except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except # this means the deletion beat us, or the stack set is not yet propagated pass time.sleep(15) module.warn( - "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format( - stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait - ) + f"Timed out waiting for stack set {stack_set_name} instances {', '.join(s['StackId'] for s in to_await)} to" + f" complete after {max_wait} seconds. Returning unfinished operation" ) def await_stack_set_exists(cfn, stack_set_name): # AWSRetry will retry on `StackSetNotFound` errors for us - ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet'] - ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) - return camel_dict_to_snake_dict(ss, ignore_list=('Tags',)) + ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)["StackSet"] + ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) + return camel_dict_to_snake_dict(ss, ignore_list=("Tags",)) def describe_stack_tree(module, stack_set_name, operation_ids=None): - jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound']) - cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) + jittered_backoff_decorator = AWSRetry.jittered_backoff( + retries=5, delay=3, max_delay=5, catch_extra_error_codes=["StackSetNotFound"] + ) + cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) result = dict() - result['stack_set'] = camel_dict_to_snake_dict( + result["stack_set"] = camel_dict_to_snake_dict( cfn.describe_stack_set( StackSetName=stack_set_name, aws_retry=True, - )['StackSet'] + )["StackSet"] ) - result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags']) - result['operations_log'] = sorted( + result["stack_set"]["tags"] = boto3_tag_list_to_ansible_dict(result["stack_set"]["tags"]) + result["operations_log"] = sorted( camel_dict_to_snake_dict( cfn.list_stack_set_operations( StackSetName=stack_set_name, aws_retry=True, ) - )['summaries'], - key=lambda x: x['creation_timestamp'] + )["summaries"], + key=lambda x: x["creation_timestamp"], ) - result['stack_instances'] = sorted( - [ - camel_dict_to_snake_dict(i) for i in - cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries'] - ], - key=lambda i: i['region'] + i['account'] + result["stack_instances"] = sorted( + [camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances(StackSetName=stack_set_name)["Summaries"]], + key=lambda i: i["region"] + i["account"], ) if operation_ids: - result['operations'] = [] + result["operations"] = [] for op_id in operation_ids: try: - result['operations'].append(camel_dict_to_snake_dict( - cfn.describe_stack_set_operation( - StackSetName=stack_set_name, - OperationId=op_id, - )['StackSetOperation'] - )) - except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except + result["operations"].append( + camel_dict_to_snake_dict( + cfn.describe_stack_set_operation( + StackSetName=stack_set_name, + OperationId=op_id, + )["StackSetOperation"] + ) + ) + except is_boto3_error_code("OperationNotFoundException"): # pylint: disable=duplicate-except pass return result def get_operation_preferences(module): params = dict() - if module.params.get('regions'): - params['RegionOrder'] = list(module.params['regions']) + if module.params.get("regions"): + params["RegionOrder"] = list(module.params["regions"]) for param, api_name in { - 'fail_count': 'FailureToleranceCount', - 'fail_percentage': 'FailureTolerancePercentage', - 'parallel_percentage': 'MaxConcurrentPercentage', - 'parallel_count': 'MaxConcurrentCount', + "fail_count": "FailureToleranceCount", + "fail_percentage": "FailureTolerancePercentage", + "parallel_percentage": "MaxConcurrentPercentage", + "parallel_count": "MaxConcurrentCount", }.items(): - if module.params.get('failure_tolerance', {}).get(param): - params[api_name] = module.params.get('failure_tolerance', {}).get(param) + if module.params.get("failure_tolerance", {}).get(param): + params[api_name] = module.params.get("failure_tolerance", {}).get(param) return params @@ -519,148 +534,154 @@ def main(): argument_spec = dict( name=dict(required=True), description=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=900), - state=dict(default='present', choices=['present', 'absent']), - purge_stacks=dict(type='bool', default=True), - parameters=dict(type='dict', default={}), - template=dict(type='path'), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=900), + state=dict(default="present", choices=["present", "absent"]), + purge_stacks=dict(type="bool", default=True), + parameters=dict(type="dict", default={}), + template=dict(type="path"), template_url=dict(), template_body=dict(), - capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), - regions=dict(type='list', elements='str'), - accounts=dict(type='list', elements='str'), + capabilities=dict(type="list", elements="str", choices=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"]), + regions=dict(type="list", elements="str"), + accounts=dict(type="list", elements="str"), failure_tolerance=dict( - type='dict', + type="dict", default={}, options=dict( - fail_count=dict(type='int'), - fail_percentage=dict(type='int'), - parallel_percentage=dict(type='int'), - parallel_count=dict(type='int'), + fail_count=dict(type="int"), + fail_percentage=dict(type="int"), + parallel_percentage=dict(type="int"), + parallel_count=dict(type="int"), ), mutually_exclusive=[ - ['fail_count', 'fail_percentage'], - ['parallel_count', 'parallel_percentage'], + ["fail_count", "fail_percentage"], + ["parallel_count", "parallel_percentage"], ], ), - administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']), - execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']), - tags=dict(type='dict'), + administration_role_arn=dict(aliases=["admin_role_arn", "administration_role", "admin_role"]), + execution_role_name=dict(aliases=["execution_role", "exec_role", "exec_role_name"]), + tags=dict(type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['template_url', 'template', 'template_body']], - supports_check_mode=True + mutually_exclusive=[["template_url", "template", "template_body"]], + supports_check_mode=True, ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes - jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound']) - cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) - existing_stack_set = stack_set_facts(cfn, module.params['name']) + jittered_backoff_decorator = AWSRetry.jittered_backoff( + retries=10, delay=3, max_delay=30, catch_extra_error_codes=["StackSetNotFound"] + ) + cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) + existing_stack_set = stack_set_facts(cfn, module.params["name"]) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} - state = module.params['state'] - if state == 'present' and not module.params['accounts']: + state = module.params["state"] + if state == "present" and not module.params["accounts"]: module.fail_json( - msg="Can't create a stack set without choosing at least one account. " + msg=( + "Can't create a stack set without choosing at least one account. " "To get the ID of the current account, use the aws_caller_info module." + ) ) - module.params['accounts'] = [to_native(a) for a in module.params['accounts']] + module.params["accounts"] = [to_native(a) for a in module.params["accounts"]] - stack_params['StackSetName'] = module.params['name'] - if module.params.get('description'): - stack_params['Description'] = module.params['description'] + stack_params["StackSetName"] = module.params["name"] + if module.params.get("description"): + stack_params["Description"] = module.params["description"] - if module.params.get('capabilities'): - stack_params['Capabilities'] = module.params['capabilities'] + if module.params.get("capabilities"): + stack_params["Capabilities"] = module.params["capabilities"] - if module.params['template'] is not None: - with open(module.params['template'], 'r') as tpl: - stack_params['TemplateBody'] = tpl.read() - elif module.params['template_body'] is not None: - stack_params['TemplateBody'] = module.params['template_body'] - elif module.params['template_url'] is not None: - stack_params['TemplateURL'] = module.params['template_url'] + if module.params["template"] is not None: + with open(module.params["template"], "r") as tpl: + stack_params["TemplateBody"] = tpl.read() + elif module.params["template_body"] is not None: + stack_params["TemplateBody"] = module.params["template_body"] + elif module.params["template_url"] is not None: + stack_params["TemplateURL"] = module.params["template_url"] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: - stack_params['UsePreviousTemplate'] = True + stack_params["UsePreviousTemplate"] = True else: module.fail_json( - msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " - "`template_body`, or `template_url`".format(module.params['name']) + msg=( + f"The Stack Set {module.params['name']} does not exist, and no template was provided. Provide one" + " of `template`, `template_body`, or `template_url`" + ) ) - stack_params['Parameters'] = [] - for k, v in module.params['parameters'].items(): + stack_params["Parameters"] = [] + for k, v in module.params["parameters"].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) - if 'value' in v: - param['ParameterValue'] = to_native(v['value']) + if "value" in v: + param["ParameterValue"] = to_native(v["value"]) - if 'use_previous_value' in v and bool(v['use_previous_value']): - param['UsePreviousValue'] = True - param.pop('ParameterValue', None) + if "use_previous_value" in v and bool(v["use_previous_value"]): + param["UsePreviousValue"] = True + param.pop("ParameterValue", None) - stack_params['Parameters'].append(param) + stack_params["Parameters"].append(param) else: # allow default k/v configuration to set a template parameter - stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + stack_params["Parameters"].append({"ParameterKey": k, "ParameterValue": str(v)}) - if module.params.get('tags') and isinstance(module.params.get('tags'), dict): - stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + if module.params.get("tags") and isinstance(module.params.get("tags"), dict): + stack_params["Tags"] = ansible_dict_to_boto3_tag_list(module.params["tags"]) - if module.params.get('administration_role_arn'): + if module.params.get("administration_role_arn"): # TODO loosen the semantics here to autodetect the account ID and build the ARN - stack_params['AdministrationRoleARN'] = module.params['administration_role_arn'] - if module.params.get('execution_role_name'): - stack_params['ExecutionRoleName'] = module.params['execution_role_name'] + stack_params["AdministrationRoleARN"] = module.params["administration_role_arn"] + if module.params.get("execution_role_name"): + stack_params["ExecutionRoleName"] = module.params["execution_role_name"] result = {} if module.check_mode: - if state == 'absent' and existing_stack_set: - module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) - elif state == 'absent' and not existing_stack_set: - module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) - elif state == 'present' and not existing_stack_set: - module.exit_json(changed=True, msg='New stack set would be created', meta=[]) - elif state == 'present' and existing_stack_set: + if state == "absent" and existing_stack_set: + module.exit_json(changed=True, msg="Stack set would be deleted", meta=[]) + elif state == "absent" and not existing_stack_set: + module.exit_json(changed=False, msg="Stack set doesn't exist", meta=[]) + elif state == "present" and not existing_stack_set: + module.exit_json(changed=True, msg="New stack set would be created", meta=[]) + elif state == "present" and existing_stack_set: new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( cfn, - module.params['name'], - module.params['accounts'], - module.params['regions'], + module.params["name"], + module.params["accounts"], + module.params["regions"], ) if new_stacks: - module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) - elif unspecified_stacks and module.params.get('purge_stack_instances'): - module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) + module.exit_json(changed=True, msg="New stack instance(s) would be created", meta=[]) + elif unspecified_stacks and module.params.get("purge_stack_instances"): + module.exit_json(changed=True, msg="Old stack instance(s) would be deleted", meta=[]) else: # TODO: need to check the template and other settings for correct check mode - module.exit_json(changed=False, msg='No changes detected', meta=[]) + module.exit_json(changed=False, msg="No changes detected", meta=[]) changed = False - if state == 'present': + if state == "present": if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log - stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid) + stack_params["ClientRequestToken"] = f"Ansible-StackSet-Create-{operation_uuid}" changed = True create_stack_set(module, stack_params, cfn) else: - stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid) - operation_ids.append(stack_params['OperationId']) - if module.params.get('regions'): - stack_params['OperationPreferences'] = get_operation_preferences(module) + stack_params["OperationId"] = f"Ansible-StackSet-Update-{operation_uuid}" + operation_ids.append(stack_params["OperationId"]) + if module.params.get("regions"): + stack_params["OperationPreferences"] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) await_stack_set_operation( @@ -674,24 +695,24 @@ def main(): # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, - module.params['name'], - module.params['accounts'], - module.params['regions'], + module.params["name"], + module.params["accounts"], + module.params["regions"], ) if new_stack_instances: - operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid)) + operation_ids.append(f"Ansible-StackInstance-Create-{operation_uuid}") changed = True cfn.create_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in new_stack_instances)), Regions=list(set(region for acct, region in new_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) else: - operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid)) + operation_ids.append(f"Ansible-StackInstance-Update-{operation_uuid}") cfn.update_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in existing_stack_instances)), Regions=list(set(region for acct, region in existing_stack_instances)), OperationPreferences=get_operation_preferences(module), @@ -699,55 +720,67 @@ def main(): ) for op in operation_ids: await_stack_set_operation( - module, cfn, operation_id=op, - stack_set_name=module.params['name'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=op, + stack_set_name=module.params["name"], + max_wait=module.params.get("wait_timeout"), ) - elif state == 'absent': + elif state == "absent": if not existing_stack_set: - module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name'])) - if module.params.get('purge_stack_instances') is False: + module.exit_json(msg=f"Stack set {module.params['name']} does not exist") + if module.params.get("purge_stack_instances") is False: pass try: cfn.delete_stack_set( - StackSetName=module.params['name'], + StackSetName=module.params["name"], + ) + module.exit_json(msg=f"Stack set {module.params['name']} deleted") + except is_boto3_error_code("OperationInProgressException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, msg=f"Cannot delete stack {module.params['name']} while there is an operation in progress" ) - module.exit_json(msg='Stack set {0} deleted'.format(module.params['name'])) - except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name'])) - except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except - delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid) + except is_boto3_error_code("StackSetNotEmptyException"): # pylint: disable=duplicate-except + delete_instances_op = f"Ansible-StackInstance-Delete-{operation_uuid}" cfn.delete_stack_instances( - StackSetName=module.params['name'], - Accounts=module.params['accounts'], - Regions=module.params['regions'], - RetainStacks=(not module.params.get('purge_stacks')), - OperationId=delete_instances_op + StackSetName=module.params["name"], + Accounts=module.params["accounts"], + Regions=module.params["regions"], + RetainStacks=(not module.params.get("purge_stacks")), + OperationId=delete_instances_op, ) await_stack_set_operation( - module, cfn, operation_id=delete_instances_op, - stack_set_name=stack_params['StackSetName'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=delete_instances_op, + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), ) try: cfn.delete_stack_set( - StackSetName=module.params['name'], + StackSetName=module.params["name"], ) - except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotEmptyException") as exc: # pylint: disable=duplicate-except # this time, it is likely that either the delete failed or there are more stacks. instances = cfn.list_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], + ) + stack_states = ", ".join( + "(account={Account}, region={Region}, state={Status})".format(**i) for i in instances["Summaries"] + ) + module.fail_json_aws( + exc, + msg="Could not purge all stacks, or not all accounts/regions were chosen for deletion: " + + stack_states, ) - stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries']) - module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) - module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name'])) + module.exit_json(changed=True, msg=f"Stack set {module.params['name']} deleted") - result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids)) - if any(o['status'] == 'FAILED' for o in result['operations']): + result.update(**describe_stack_tree(module, stack_params["StackSetName"], operation_ids=operation_ids)) + if any(o["status"] == "FAILED" for o in result["operations"]): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py index 447fd994e..13718cfb8 100644 --- a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py +++ b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- version_added: 1.0.0 @@ -21,12 +19,6 @@ author: - Willem van Ketwich (@wilvk) - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags - options: state: @@ -119,6 +111,17 @@ options: origin_path: description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. type: str + origin_shield: + description: Specify origin shield options for the origin. + type: dict + suboptions: + enabled: + description: Indicate whether you want the origin to have Origin Shield enabled or not. + type: bool + origin_shield_region: + description: Specify which AWS region will be used for Origin Shield. Required if Origin Shield is enabled. + type: str + version_added: 6.0.0 custom_headers: description: - Custom headers you wish to add to the request before passing it to the origin. @@ -169,7 +172,18 @@ options: origin_keepalive_timeout: description: A keep-alive timeout (in seconds). type: int - + connection_attempts: + description: The number of times that CloudFront attempts to connect to the origin. + The minimum number is C(1), the maximum is C(3). + type: int + default: 3 + version_added: 6.0.0 + connection_timeout: + description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. + The minimum timeout is C(1) second, the maximum is C(10) seconds. + type: int + default: 10 + version_added: 6.0.0 purge_origins: description: Whether to remove any origins that aren't listed in I(origins). default: false @@ -191,9 +205,25 @@ options: description: - The ID of the header policy that CloudFront adds to responses that it sends to viewers. type: str + cache_policy_id: + version_added: 7.1.0 + description: + - The ID of the cache policy for CloudFront to use for the default cache behavior. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) + type: str + origin_request_policy_id: + version_added: 7.1.0 + description: + - The ID of the origin request policy for CloudFront to use for the default cache behavior. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. type: dict suboptions: query_string: @@ -312,9 +342,25 @@ options: description: - The ID of the header policy that CloudFront adds to responses that it sends to viewers. type: str + cache_policy_id: + version_added: 7.1.0 + description: + - The ID of the cache policy for CloudFront to use for the cache behavior. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) + type: str + origin_request_policy_id: + version_added: 7.1.0 + description: + - The ID of the origin request policy for CloudFront to use for the cache behavior. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. type: dict suboptions: query_string: @@ -556,7 +602,7 @@ options: description: - The version of the http protocol to use for the distribution. - AWS defaults this to C(http2). - - Valid values are C(http1.1) and C(http2). + - Valid values are C(http1.1), C(http2), C(http3) and C(http2and3). type: str ipv6_enabled: @@ -577,9 +623,14 @@ options: default: 1800 type: int -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: create a basic distribution with defaults and tags community.aws.cloudfront_distribution: state: present @@ -606,7 +657,9 @@ EXAMPLES = r''' state: present distribution_id: E1RP5A2MJ8073O comment: modified by cloudfront.py again - aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ] + aliases: + - 'www.my-distribution-source.com' + - 'zzz.aaa.io' - name: update a distribution's aliases and comment using an alias as a reference community.aws.cloudfront_distribution: @@ -633,12 +686,12 @@ EXAMPLES = r''' state: present caller_reference: unique test distribution ID origins: - - id: 'my test origin-000111' - domain_name: www.example.com - origin_path: /production - custom_headers: - - header_name: MyCustomHeaderName - header_value: MyCustomHeaderValue + - id: 'my test origin-000111' + domain_name: www.example.com + origin_path: /production + custom_headers: + - header_name: MyCustomHeaderName + header_value: MyCustomHeaderValue default_cache_behavior: target_origin_id: 'my test origin-000111' forwarded_values: @@ -646,7 +699,7 @@ EXAMPLES = r''' cookies: forward: all headers: - - '*' + - '*' viewer_protocol_policy: allow-all smooth_streaming: true compress: true @@ -669,9 +722,9 @@ EXAMPLES = r''' community.aws.cloudfront_distribution: state: absent caller_reference: replaceable distribution -''' +""" -RETURN = r''' +RETURN = r""" active_trusted_signers: description: Key pair IDs that CloudFront is aware of for each trusted signer. returned: always @@ -1278,6 +1331,32 @@ origins: returned: always type: str sample: '' + connection_attempts: + description: The number of times that CloudFront attempts to connect to the origin. + returned: always + type: int + sample: 3 + connection_timeout: + description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. + returned: always + type: int + sample: 10 + origin_shield: + description: Configuration of the origin Origin Shield. + returned: always + type: complex + contains: + enabled: + description: Whether Origin Shield is enabled or not. + returned: always + type: bool + sample: false + origin_shield_region: + description: Which region is used by Origin Shield. + returned: when enabled is true + type: str + sample: us-east-1 + version_added: 6.0.0 s3_origin_config: description: Origin access identity configuration for S3 Origin. returned: when s3_origin_access_identity_enabled is true @@ -1368,29 +1447,31 @@ web_acl_id: returned: always type: str sample: abcd1234-1234-abcd-abcd-abcd12345678 -''' +""" -from ansible.module_utils._text import to_text, to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager -from ansible.module_utils.common.dict_transformations import recursive_diff -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict import datetime - -try: - from collections import OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict - except ImportError: - pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed) +import re +from collections import OrderedDict try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import recursive_diff +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def change_dict_key_name(dictionary, old_key, new_key): if old_key in dictionary: @@ -1417,43 +1498,44 @@ def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): if list_items is None: list_items = [] if not isinstance(list_items, list): - raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items))) + raise ValueError(f"Expected a list, got a {type(list_items).__name__} with value {str(list_items)}") result = {} if include_quantity: - result['quantity'] = len(list_items) + result["quantity"] = len(list_items) if len(list_items) > 0: - result['items'] = list_items + result["items"] = list_items return result def create_distribution(client, module, config, tags): try: if not tags: - return client.create_distribution(aws_retry=True, DistributionConfig=config)['Distribution'] + return client.create_distribution(aws_retry=True, DistributionConfig=config)["Distribution"] else: - distribution_config_with_tags = { - 'DistributionConfig': config, - 'Tags': { - 'Items': tags - } - } - return client.create_distribution_with_tags(aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags)['Distribution'] + distribution_config_with_tags = {"DistributionConfig": config, "Tags": {"Items": tags}} + return client.create_distribution_with_tags( + aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags + )["Distribution"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating distribution") def delete_distribution(client, module, distribution): try: - return client.delete_distribution(aws_retry=True, Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag']) + return client.delete_distribution( + aws_retry=True, Id=distribution["Distribution"]["Id"], IfMatch=distribution["ETag"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution'])) + module.fail_json_aws(e, msg=f"Error deleting distribution {to_native(distribution['Distribution'])}") def update_distribution(client, module, config, distribution_id, e_tag): try: - return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution'] + return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)[ + "Distribution" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) + module.fail_json_aws(e, msg=f"Error updating distribution to {to_native(config)}") def tag_resource(client, module, arn, tags): @@ -1473,7 +1555,7 @@ def untag_resource(client, module, arn, tag_keys): def list_tags_for_resource(client, module, arn): try: response = client.list_tags_for_resource(aws_retry=True, Resource=arn) - return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items')) + return boto3_tag_list_to_ansible_dict(response.get("Tags").get("Items")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error listing tags for resource") @@ -1505,103 +1587,131 @@ class CloudFrontValidationManager(object): self.__default_https_port = 443 self.__default_ipv6_enabled = False self.__default_origin_ssl_protocols = [ - 'TLSv1', - 'TLSv1.1', - 'TLSv1.2' + "TLSv1", + "TLSv1.1", + "TLSv1.2", ] - self.__default_custom_origin_protocol_policy = 'match-viewer' + self.__default_custom_origin_protocol_policy = "match-viewer" self.__default_custom_origin_read_timeout = 30 self.__default_custom_origin_keepalive_timeout = 5 - self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + self.__default_datetime_string = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") self.__default_cache_behavior_min_ttl = 0 self.__default_cache_behavior_max_ttl = 31536000 self.__default_cache_behavior_default_ttl = 86400 self.__default_cache_behavior_compress = False - self.__default_cache_behavior_viewer_protocol_policy = 'allow-all' + self.__default_cache_behavior_viewer_protocol_policy = "allow-all" self.__default_cache_behavior_smooth_streaming = False - self.__default_cache_behavior_forwarded_values_forward_cookies = 'none' + self.__default_cache_behavior_forwarded_values_forward_cookies = "none" self.__default_cache_behavior_forwarded_values_query_string = True self.__default_trusted_signers_enabled = False - self.__valid_price_classes = set([ - 'PriceClass_100', - 'PriceClass_200', - 'PriceClass_All' - ]) - self.__valid_origin_protocol_policies = set([ - 'http-only', - 'match-viewer', - 'https-only' - ]) - self.__valid_origin_ssl_protocols = set([ - 'SSLv3', - 'TLSv1', - 'TLSv1.1', - 'TLSv1.2' - ]) - self.__valid_cookie_forwarding = set([ - 'none', - 'whitelist', - 'all' - ]) - self.__valid_viewer_protocol_policies = set([ - 'allow-all', - 'https-only', - 'redirect-to-https' - ]) - self.__valid_methods = set([ - 'GET', - 'HEAD', - 'POST', - 'PUT', - 'PATCH', - 'OPTIONS', - 'DELETE' - ]) + self.__valid_price_classes = set( + [ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All", + ] + ) + self.__valid_origin_protocol_policies = set( + [ + "http-only", + "match-viewer", + "https-only", + ] + ) + self.__valid_origin_ssl_protocols = set( + [ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2", + ] + ) + self.__valid_cookie_forwarding = set( + [ + "none", + "whitelist", + "all", + ] + ) + self.__valid_viewer_protocol_policies = set( + [ + "allow-all", + "https-only", + "redirect-to-https", + ] + ) + self.__valid_methods = set( + [ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE", + ] + ) self.__valid_methods_cached_methods = [ - set([ - 'GET', - 'HEAD' - ]), - set([ - 'GET', - 'HEAD', - 'OPTIONS' - ]) + set( + [ + "GET", + "HEAD", + ] + ), + set( + [ + "GET", + "HEAD", + "OPTIONS", + ] + ), ] self.__valid_methods_allowed_methods = [ self.__valid_methods_cached_methods[0], self.__valid_methods_cached_methods[1], - self.__valid_methods + self.__valid_methods, ] - self.__valid_lambda_function_association_event_types = set([ - 'viewer-request', - 'viewer-response', - 'origin-request', - 'origin-response' - ]) - self.__valid_viewer_certificate_ssl_support_methods = set([ - 'sni-only', - 'vip' - ]) - self.__valid_viewer_certificate_minimum_protocol_versions = set([ - 'SSLv3', - 'TLSv1', - 'TLSv1_2016', - 'TLSv1.1_2016', - 'TLSv1.2_2018', - 'TLSv1.2_2019', - 'TLSv1.2_2021' - ]) - self.__valid_viewer_certificate_certificate_sources = set([ - 'cloudfront', - 'iam', - 'acm' - ]) - self.__valid_http_versions = set([ - 'http1.1', - 'http2' - ]) - self.__s3_bucket_domain_identifier = '.s3.amazonaws.com' + self.__valid_lambda_function_association_event_types = set( + [ + "viewer-request", + "viewer-response", + "origin-request", + "origin-response", + ] + ) + self.__valid_viewer_certificate_ssl_support_methods = set( + [ + "sni-only", + "vip", + ] + ) + self.__valid_viewer_certificate_minimum_protocol_versions = set( + [ + "SSLv3", + "TLSv1", + "TLSv1_2016", + "TLSv1.1_2016", + "TLSv1.2_2018", + "TLSv1.2_2019", + "TLSv1.2_2021", + ] + ) + self.__valid_viewer_certificate_certificate_sources = set( + [ + "cloudfront", + "iam", + "acm", + ] + ) + self.__valid_http_versions = set( + [ + "http1.1", + "http2", + "http3", + "http2and3", + ] + ) + self.__s3_bucket_domain_regex = re.compile(r"\.s3(?:\.[^.]+)?\.amazonaws\.com$") def add_missing_key(self, dict_object, key_to_set, value_to_set): if key_to_set not in dict_object and value_to_set is not None: @@ -1615,7 +1725,9 @@ class CloudFrontValidationManager(object): dict_object = change_dict_key_name(dict_object, old_key, new_key) return dict_object - def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False): + def add_key_else_validate( + self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False + ): if key_name in dict_object: self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values) else: @@ -1630,26 +1742,36 @@ class CloudFrontValidationManager(object): if logging is None: return None valid_logging = {} - if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging): - self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.") - valid_logging['include_cookies'] = logging.get('include_cookies') - valid_logging['enabled'] = logging.get('enabled') - valid_logging['bucket'] = logging.get('bucket') - valid_logging['prefix'] = logging.get('prefix') + if logging and not set(["enabled", "include_cookies", "bucket", "prefix"]).issubset(logging): + self.module.fail_json( + msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified." + ) + valid_logging["include_cookies"] = logging.get("include_cookies") + valid_logging["enabled"] = logging.get("enabled") + valid_logging["bucket"] = logging.get("bucket") + valid_logging["prefix"] = logging.get("prefix") return valid_logging except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution logging") def validate_is_list(self, list_to_validate, list_name): if not isinstance(list_to_validate, list): - self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__)) + self.module.fail_json(msg=f"{list_name} is of type {type(list_to_validate).__name__}. Must be a list.") def validate_required_key(self, key_name, full_key_name, dict_object): if key_name not in dict_object: - self.module.fail_json(msg="%s must be specified." % full_key_name) - - def validate_origins(self, client, config, origins, default_origin_domain_name, - default_origin_path, create_distribution, purge_origins=False): + self.module.fail_json(msg=f"{full_key_name} must be specified.") + + def validate_origins( + self, + client, + config, + origins, + default_origin_domain_name, + default_origin_path, + create_distribution, + purge_origins=False, + ): try: if origins is None: if default_origin_domain_name is None and not create_distribution: @@ -1658,23 +1780,24 @@ class CloudFrontValidationManager(object): else: return ansible_list_to_cloudfront_list(config) if default_origin_domain_name is not None: - origins = [{ - 'domain_name': default_origin_domain_name, - 'origin_path': default_origin_path or '' - }] + origins = [{"domain_name": default_origin_domain_name, "origin_path": default_origin_path or ""}] else: origins = [] - self.validate_is_list(origins, 'origins') + self.validate_is_list(origins, "origins") if not origins and default_origin_domain_name is None and create_distribution: - self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.") + self.module.fail_json( + msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one." + ) all_origins = OrderedDict() new_domains = list() for origin in config: - all_origins[origin.get('domain_name')] = origin + all_origins[origin.get("domain_name")] = origin for origin in origins: - origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path) - all_origins[origin['domain_name']] = origin - new_domains.append(origin['domain_name']) + origin = self.validate_origin( + client, all_origins.get(origin.get("domain_name"), {}), origin, default_origin_path + ) + all_origins[origin["domain_name"]] = origin + new_domains.append(origin["domain_name"]) if purge_origins: for domain in list(all_origins.keys()): if domain not in new_domains: @@ -1684,37 +1807,55 @@ class CloudFrontValidationManager(object): self.module.fail_json_aws(e, msg="Error validating distribution origins") def validate_s3_origin_configuration(self, client, existing_config, origin): - if origin.get('s3_origin_config', {}).get('origin_access_identity'): - return origin['s3_origin_config']['origin_access_identity'] + if origin.get("s3_origin_config", {}).get("origin_access_identity"): + return origin["s3_origin_config"]["origin_access_identity"] - if existing_config.get('s3_origin_config', {}).get('origin_access_identity'): - return existing_config['s3_origin_config']['origin_access_identity'] + if existing_config.get("s3_origin_config", {}).get("origin_access_identity"): + return existing_config["s3_origin_config"]["origin_access_identity"] try: - comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) - caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) - cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, - Comment=comment)) - oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id'] + comment = f"access-identity-by-ansible-{origin.get('domain_name')}-{self.__default_datetime_string}" + caller_reference = f"{origin.get('domain_name')}-{self.__default_datetime_string}" + cfoai_config = dict( + CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment) + ) + oai = client.create_cloud_front_origin_access_identity(**cfoai_config)["CloudFrontOriginAccessIdentity"][ + "Id" + ] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id']) - return "origin-access-identity/cloudfront/%s" % oai + self.module.fail_json_aws(e, msg=f"Couldn't create Origin Access Identity for id {origin['id']}") + return f"origin-access-identity/cloudfront/{oai}" def validate_origin(self, client, existing_config, origin, default_origin_path): try: - origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or '')) - self.validate_required_key('origin_path', 'origins[].origin_path', origin) - origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string)) - if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0: - for custom_header in origin.get('custom_headers'): - if 'header_name' not in custom_header or 'header_value' not in custom_header: - self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.") - origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers')) + origin = self.add_missing_key( + origin, "origin_path", existing_config.get("origin_path", default_origin_path or "") + ) + self.validate_required_key("origin_path", "origins[].origin_path", origin) + origin = self.add_missing_key(origin, "id", existing_config.get("id", self.__default_datetime_string)) + if "custom_headers" in origin and len(origin.get("custom_headers")) > 0: + for custom_header in origin.get("custom_headers"): + if "header_name" not in custom_header or "header_value" not in custom_header: + self.module.fail_json( + msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified." + ) + origin["custom_headers"] = ansible_list_to_cloudfront_list(origin.get("custom_headers")) else: - origin['custom_headers'] = ansible_list_to_cloudfront_list() - if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): + origin["custom_headers"] = ansible_list_to_cloudfront_list() + if "origin_shield" in origin: + origin_shield = origin.get("origin_shield") + if origin_shield.get("enabled"): + origin_shield_region = origin_shield.get("origin_shield_region") + if origin_shield_region is None: + self.module.fail_json( + msg="origins[].origin_shield.origin_shield_region must be specified" + " when origins[].origin_shield.enabled is true." + ) + else: + origin_shield_region = origin_shield_region.lower() + if self.__s3_bucket_domain_regex.search(origin.get("domain_name").lower()): if origin.get("s3_origin_access_identity_enabled") is not None: - if origin['s3_origin_access_identity_enabled']: + if origin["s3_origin_access_identity_enabled"]: s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) else: s3_origin_config = None @@ -1728,26 +1869,47 @@ class CloudFrontValidationManager(object): origin["s3_origin_config"] = dict(origin_access_identity=oai) - if 'custom_origin_config' in origin: - self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive") + if "custom_origin_config" in origin: + self.module.fail_json( + msg="s3 origin domains and custom_origin_config are mutually exclusive", + ) else: - origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {})) - custom_origin_config = origin.get('custom_origin_config') - custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy', - 'origins[].custom_origin_config.origin_protocol_policy', - self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies) - custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout) - custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout) - custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port) - custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port) - if custom_origin_config.get('origin_ssl_protocols', {}).get('items'): - custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items'] - if custom_origin_config.get('origin_ssl_protocols'): - self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols', - self.__valid_origin_ssl_protocols) + origin = self.add_missing_key( + origin, "custom_origin_config", existing_config.get("custom_origin_config", {}) + ) + custom_origin_config = origin.get("custom_origin_config") + custom_origin_config = self.add_key_else_validate( + custom_origin_config, + "origin_protocol_policy", + "origins[].custom_origin_config.origin_protocol_policy", + self.__default_custom_origin_protocol_policy, + self.__valid_origin_protocol_policies, + ) + custom_origin_config = self.add_missing_key( + custom_origin_config, "origin_read_timeout", self.__default_custom_origin_read_timeout + ) + custom_origin_config = self.add_missing_key( + custom_origin_config, "origin_keepalive_timeout", self.__default_custom_origin_keepalive_timeout + ) + custom_origin_config = self.add_key_else_change_dict_key( + custom_origin_config, "http_port", "h_t_t_p_port", self.__default_http_port + ) + custom_origin_config = self.add_key_else_change_dict_key( + custom_origin_config, "https_port", "h_t_t_p_s_port", self.__default_https_port + ) + if custom_origin_config.get("origin_ssl_protocols", {}).get("items"): + custom_origin_config["origin_ssl_protocols"] = custom_origin_config["origin_ssl_protocols"]["items"] + if custom_origin_config.get("origin_ssl_protocols"): + self.validate_attribute_list_with_allowed_list( + custom_origin_config["origin_ssl_protocols"], + "origins[].origin_ssl_protocols", + self.__valid_origin_ssl_protocols, + ) else: - custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols - custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols']) + custom_origin_config["origin_ssl_protocols"] = self.__default_origin_ssl_protocols + custom_origin_config["origin_ssl_protocols"] = ansible_list_to_cloudfront_list( + custom_origin_config["origin_ssl_protocols"] + ) return origin except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating distribution origin") @@ -1761,13 +1923,16 @@ class CloudFrontValidationManager(object): # is true (if purge_cache_behaviors is not true, we can't really know the full new order) if not purge_cache_behaviors: for behavior in config: - all_cache_behaviors[behavior['path_pattern']] = behavior + all_cache_behaviors[behavior["path_pattern"]] = behavior for cache_behavior in cache_behaviors: - valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}), - cache_behavior, valid_origins) - all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior + valid_cache_behavior = self.validate_cache_behavior( + all_cache_behaviors.get(cache_behavior.get("path_pattern"), {}), cache_behavior, valid_origins + ) + all_cache_behaviors[cache_behavior["path_pattern"]] = valid_cache_behavior if purge_cache_behaviors: - for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]): + for target_origin_id in set(all_cache_behaviors.keys()) - set( + [cb["path_pattern"] for cb in cache_behaviors] + ): del all_cache_behaviors[target_origin_id] return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) except Exception as e: @@ -1778,40 +1943,79 @@ class CloudFrontValidationManager(object): cache_behavior = {} if cache_behavior is None and valid_origins is not None: return config - cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache) - cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior) - cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior) - cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior) - cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior) - cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior) + cache_behavior = self.validate_cache_behavior_first_level_keys( + config, cache_behavior, valid_origins, is_default_cache + ) + if cache_behavior.get("cache_policy_id") is None: + cache_behavior = self.validate_forwarded_values( + config, cache_behavior.get("forwarded_values"), cache_behavior + ) + cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior) + cache_behavior = self.validate_lambda_function_associations( + config, cache_behavior.get("lambda_function_associations"), cache_behavior + ) + cache_behavior = self.validate_trusted_signers(config, cache_behavior.get("trusted_signers"), cache_behavior) + cache_behavior = self.validate_field_level_encryption_id( + config, cache_behavior.get("field_level_encryption_id"), cache_behavior + ) return cache_behavior def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): + if cache_behavior.get("cache_policy_id") is not None and cache_behavior.get("forwarded_values") is not None: + if is_default_cache: + cache_behavior_name = "Default cache behavior" + else: + cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}" + self.module.fail_json( + msg=f"{cache_behavior_name} cannot have both a cache_policy_id and a forwarded_values option." + ) try: - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l', - config.get('min_t_t_l', self.__default_cache_behavior_min_ttl)) - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l', - config.get('max_t_t_l', self.__default_cache_behavior_max_ttl)) - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l', - config.get('default_t_t_l', self.__default_cache_behavior_default_ttl)) - cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress)) - target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id')) + if cache_behavior.get("cache_policy_id") is None: + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "min_ttl", + "min_t_t_l", + config.get("min_t_t_l", self.__default_cache_behavior_min_ttl), + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "max_ttl", + "max_t_t_l", + config.get("max_t_t_l", self.__default_cache_behavior_max_ttl), + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "default_ttl", + "default_t_t_l", + config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), + ) + cache_behavior = self.add_missing_key( + cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress) + ) + target_origin_id = cache_behavior.get("target_origin_id", config.get("target_origin_id")) if not target_origin_id: target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins) - if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]: + if target_origin_id not in [origin["id"] for origin in valid_origins.get("items", [])]: if is_default_cache: - cache_behavior_name = 'Default cache behavior' + cache_behavior_name = "Default cache behavior" else: - cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern'] - self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." % - cache_behavior_name) - cache_behavior['target_origin_id'] = target_origin_id - cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy', - config.get('viewer_protocol_policy', - self.__default_cache_behavior_viewer_protocol_policy), - self.__valid_viewer_protocol_policies) - cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming', - config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming)) + cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}" + self.module.fail_json( + msg=f"{cache_behavior_name} has target_origin_id pointing to an origin that does not exist." + ) + cache_behavior["target_origin_id"] = target_origin_id + cache_behavior = self.add_key_else_validate( + cache_behavior, + "viewer_protocol_policy", + "cache_behavior.viewer_protocol_policy", + config.get("viewer_protocol_policy", self.__default_cache_behavior_viewer_protocol_policy), + self.__valid_viewer_protocol_policies, + ) + cache_behavior = self.add_missing_key( + cache_behavior, + "smooth_streaming", + config.get("smooth_streaming", self.__default_cache_behavior_smooth_streaming), + ) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys") @@ -1820,30 +2024,40 @@ class CloudFrontValidationManager(object): try: if not forwarded_values: forwarded_values = dict() - existing_config = config.get('forwarded_values', {}) - headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items')) + existing_config = config.get("forwarded_values", {}) + headers = forwarded_values.get("headers", existing_config.get("headers", {}).get("items")) if headers: headers.sort() - forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers) - if 'cookies' not in forwarded_values: - forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies) - forwarded_values['cookies'] = {'forward': forward} + forwarded_values["headers"] = ansible_list_to_cloudfront_list(headers) + if "cookies" not in forwarded_values: + forward = existing_config.get("cookies", {}).get( + "forward", self.__default_cache_behavior_forwarded_values_forward_cookies + ) + forwarded_values["cookies"] = {"forward": forward} else: - existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items') - whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist) + existing_whitelist = existing_config.get("cookies", {}).get("whitelisted_names", {}).get("items") + whitelist = forwarded_values.get("cookies").get("whitelisted_names", existing_whitelist) if whitelist: - self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names') - forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist) - cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward')) - self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward', - self.__valid_cookie_forwarding) - forwarded_values['cookies']['forward'] = cookie_forwarding - query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', [])) - self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys') - forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys) - forwarded_values = self.add_missing_key(forwarded_values, 'query_string', - existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string)) - cache_behavior['forwarded_values'] = forwarded_values + self.validate_is_list(whitelist, "forwarded_values.whitelisted_names") + forwarded_values["cookies"]["whitelisted_names"] = ansible_list_to_cloudfront_list(whitelist) + cookie_forwarding = forwarded_values.get("cookies").get( + "forward", existing_config.get("cookies", {}).get("forward") + ) + self.validate_attribute_with_allowed_values( + cookie_forwarding, "cache_behavior.forwarded_values.cookies.forward", self.__valid_cookie_forwarding + ) + forwarded_values["cookies"]["forward"] = cookie_forwarding + query_string_cache_keys = forwarded_values.get( + "query_string_cache_keys", existing_config.get("query_string_cache_keys", {}).get("items", []) + ) + self.validate_is_list(query_string_cache_keys, "forwarded_values.query_string_cache_keys") + forwarded_values["query_string_cache_keys"] = ansible_list_to_cloudfront_list(query_string_cache_keys) + forwarded_values = self.add_missing_key( + forwarded_values, + "query_string", + existing_config.get("query_string", self.__default_cache_behavior_forwarded_values_query_string), + ) + cache_behavior["forwarded_values"] = forwarded_values return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating forwarded values") @@ -1851,57 +2065,68 @@ class CloudFrontValidationManager(object): def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior): try: if lambda_function_associations is not None: - self.validate_is_list(lambda_function_associations, 'lambda_function_associations') + self.validate_is_list(lambda_function_associations, "lambda_function_associations") for association in lambda_function_associations: - association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n') - self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type', - self.__valid_lambda_function_association_event_types) - cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations) + association = change_dict_key_name(association, "lambda_function_arn", "lambda_function_a_r_n") + self.validate_attribute_with_allowed_values( + association.get("event_type"), + "cache_behaviors[].lambda_function_associations.event_type", + self.__valid_lambda_function_association_event_types, + ) + cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list( + lambda_function_associations + ) else: - if 'lambda_function_associations' in config: - cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations') + if "lambda_function_associations" in config: + cache_behavior["lambda_function_associations"] = config.get("lambda_function_associations") else: - cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([]) + cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list([]) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating lambda function associations") def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior): if field_level_encryption_id is not None: - cache_behavior['field_level_encryption_id'] = field_level_encryption_id - elif 'field_level_encryption_id' in config: - cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id') + cache_behavior["field_level_encryption_id"] = field_level_encryption_id + elif "field_level_encryption_id" in config: + cache_behavior["field_level_encryption_id"] = config.get("field_level_encryption_id") else: - cache_behavior['field_level_encryption_id'] = "" + cache_behavior["field_level_encryption_id"] = "" return cache_behavior def validate_allowed_methods(self, config, allowed_methods, cache_behavior): try: if allowed_methods is not None: - self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods) - temp_allowed_items = allowed_methods.get('items') - self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items') - self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]', - self.__valid_methods_allowed_methods) - cached_items = allowed_methods.get('cached_methods') - if 'cached_methods' in allowed_methods: - self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods') - self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]', - self.__valid_methods_cached_methods) + self.validate_required_key("items", "cache_behavior.allowed_methods.items[]", allowed_methods) + temp_allowed_items = allowed_methods.get("items") + self.validate_is_list(temp_allowed_items, "cache_behavior.allowed_methods.items") + self.validate_attribute_list_with_allowed_list( + temp_allowed_items, "cache_behavior.allowed_methods.items[]", self.__valid_methods_allowed_methods + ) + cached_items = allowed_methods.get("cached_methods") + if "cached_methods" in allowed_methods: + self.validate_is_list(cached_items, "cache_behavior.allowed_methods.cached_methods") + self.validate_attribute_list_with_allowed_list( + cached_items, + "cache_behavior.allowed_items.cached_methods[]", + self.__valid_methods_cached_methods, + ) # we don't care if the order of how cloudfront stores the methods differs - preserving existing # order reduces likelihood of making unnecessary changes - if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items): - cache_behavior['allowed_methods'] = config['allowed_methods'] + if "allowed_methods" in config and set(config["allowed_methods"]["items"]) == set(temp_allowed_items): + cache_behavior["allowed_methods"] = config["allowed_methods"] else: - cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items) + cache_behavior["allowed_methods"] = ansible_list_to_cloudfront_list(temp_allowed_items) - if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])): - cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods'] + if cached_items and set(cached_items) == set( + config.get("allowed_methods", {}).get("cached_methods", {}).get("items", []) + ): + cache_behavior["allowed_methods"]["cached_methods"] = config["allowed_methods"]["cached_methods"] else: - cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items) + cache_behavior["allowed_methods"]["cached_methods"] = ansible_list_to_cloudfront_list(cached_items) else: - if 'allowed_methods' in config: - cache_behavior['allowed_methods'] = config.get('allowed_methods') + if "allowed_methods" in config: + cache_behavior["allowed_methods"] = config.get("allowed_methods") return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating allowed methods") @@ -1910,14 +2135,16 @@ class CloudFrontValidationManager(object): try: if trusted_signers is None: trusted_signers = {} - if 'items' in trusted_signers: - valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items')) + if "items" in trusted_signers: + valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get("items")) else: - valid_trusted_signers = dict(quantity=config.get('quantity', 0)) - if 'items' in config: - valid_trusted_signers = dict(items=config['items']) - valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled)) - cache_behavior['trusted_signers'] = valid_trusted_signers + valid_trusted_signers = dict(quantity=config.get("quantity", 0)) + if "items" in config: + valid_trusted_signers = dict(items=config["items"]) + valid_trusted_signers["enabled"] = trusted_signers.get( + "enabled", config.get("enabled", self.__default_trusted_signers_enabled) + ) + cache_behavior["trusted_signers"] = valid_trusted_signers return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating trusted signers") @@ -1926,19 +2153,37 @@ class CloudFrontValidationManager(object): try: if viewer_certificate is None: return None - if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None: - self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + - "_certificate set to true.") - self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method', - self.__valid_viewer_certificate_ssl_support_methods) - self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version', - self.__valid_viewer_certificate_minimum_protocol_versions) - self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source', - self.__valid_viewer_certificate_certificate_sources) - viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate') - viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method') - viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id') - viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn') + if ( + viewer_certificate.get("cloudfront_default_certificate") + and viewer_certificate.get("ssl_support_method") is not None + ): + self.module.fail_json( + msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + + "_certificate set to true." + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("ssl_support_method"), + "viewer_certificate.ssl_support_method", + self.__valid_viewer_certificate_ssl_support_methods, + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("minimum_protocol_version"), + "viewer_certificate.minimum_protocol_version", + self.__valid_viewer_certificate_minimum_protocol_versions, + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("certificate_source"), + "viewer_certificate.certificate_source", + self.__valid_viewer_certificate_certificate_sources, + ) + viewer_certificate = change_dict_key_name( + viewer_certificate, "cloudfront_default_certificate", "cloud_front_default_certificate" + ) + viewer_certificate = change_dict_key_name(viewer_certificate, "ssl_support_method", "s_s_l_support_method") + viewer_certificate = change_dict_key_name(viewer_certificate, "iam_certificate_id", "i_a_m_certificate_id") + viewer_certificate = change_dict_key_name( + viewer_certificate, "acm_certificate_arn", "a_c_m_certificate_arn" + ) return viewer_certificate except Exception as e: self.module.fail_json_aws(e, msg="Error validating viewer certificate") @@ -1947,16 +2192,18 @@ class CloudFrontValidationManager(object): try: if custom_error_responses is None and not purge_custom_error_responses: return ansible_list_to_cloudfront_list(config) - self.validate_is_list(custom_error_responses, 'custom_error_responses') + self.validate_is_list(custom_error_responses, "custom_error_responses") result = list() - existing_responses = dict((response['error_code'], response) for response in custom_error_responses) + existing_responses = dict((response["error_code"], response) for response in custom_error_responses) for custom_error_response in custom_error_responses: - self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response) - custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l') - if 'response_code' in custom_error_response: - custom_error_response['response_code'] = str(custom_error_response['response_code']) - if custom_error_response['error_code'] in existing_responses: - del existing_responses[custom_error_response['error_code']] + self.validate_required_key("error_code", "custom_error_responses[].error_code", custom_error_response) + custom_error_response = change_dict_key_name( + custom_error_response, "error_caching_min_ttl", "error_caching_min_t_t_l" + ) + if "response_code" in custom_error_response: + custom_error_response["response_code"] = str(custom_error_response["response_code"]) + if custom_error_response["error_code"] in existing_responses: + del existing_responses[custom_error_response["error_code"]] result.append(custom_error_response) if not purge_custom_error_responses: result.extend(existing_responses.values()) @@ -1972,54 +2219,72 @@ class CloudFrontValidationManager(object): return None else: return config - self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions) - geo_restriction = restrictions.get('geo_restriction') - self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction) - existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', []) - geo_restriction_items = geo_restriction.get('items') + self.validate_required_key("geo_restriction", "restrictions.geo_restriction", restrictions) + geo_restriction = restrictions.get("geo_restriction") + self.validate_required_key( + "restriction_type", "restrictions.geo_restriction.restriction_type", geo_restriction + ) + existing_restrictions = ( + config.get("geo_restriction", {}).get(geo_restriction["restriction_type"], {}).get("items", []) + ) + geo_restriction_items = geo_restriction.get("items") if not purge_restrictions: - geo_restriction_items.extend([rest for rest in existing_restrictions if - rest not in geo_restriction_items]) + geo_restriction_items.extend( + [rest for rest in existing_restrictions if rest not in geo_restriction_items] + ) valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items) - valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type') - return {'geo_restriction': valid_restrictions} + valid_restrictions["restriction_type"] = geo_restriction.get("restriction_type") + return {"geo_restriction": valid_restrictions} except Exception as e: self.module.fail_json_aws(e, msg="Error validating restrictions") - def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id): + def validate_distribution_config_parameters( + self, config, default_root_object, ipv6_enabled, http_version, web_acl_id + ): try: - config['default_root_object'] = default_root_object or config.get('default_root_object', '') - config['is_i_p_v6_enabled'] = ipv6_enabled if ipv6_enabled is not None else config.get('is_i_p_v6_enabled', self.__default_ipv6_enabled) - if http_version is not None or config.get('http_version'): - self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions) - config['http_version'] = http_version or config.get('http_version') - if web_acl_id or config.get('web_a_c_l_id'): - config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id') + config["default_root_object"] = default_root_object or config.get("default_root_object", "") + config["is_i_p_v6_enabled"] = ( + ipv6_enabled + if ipv6_enabled is not None + else config.get("is_i_p_v6_enabled", self.__default_ipv6_enabled) + ) + if http_version is not None or config.get("http_version"): + self.validate_attribute_with_allowed_values(http_version, "http_version", self.__valid_http_versions) + config["http_version"] = http_version or config.get("http_version") + if web_acl_id or config.get("web_a_c_l_id"): + config["web_a_c_l_id"] = web_acl_id or config.get("web_a_c_l_id") return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution config parameters") - def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False): + def validate_common_distribution_parameters( + self, config, enabled, aliases, logging, price_class, purge_aliases=False + ): try: if config is None: config = {} if aliases is not None: if not purge_aliases: - aliases.extend([alias for alias in config.get('aliases', {}).get('items', []) - if alias not in aliases]) - config['aliases'] = ansible_list_to_cloudfront_list(aliases) + aliases.extend( + [alias for alias in config.get("aliases", {}).get("items", []) if alias not in aliases] + ) + config["aliases"] = ansible_list_to_cloudfront_list(aliases) if logging is not None: - config['logging'] = self.validate_logging(logging) - config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled) + config["logging"] = self.validate_logging(logging) + config["enabled"] = ( + enabled if enabled is not None else config.get("enabled", self.__default_distribution_enabled) + ) if price_class is not None: - self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes) - config['price_class'] = price_class + self.validate_attribute_with_allowed_values(price_class, "price_class", self.__valid_price_classes) + config["price_class"] = price_class return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating common distribution parameters") def validate_comment(self, config, comment): - config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string) + config["comment"] = comment or config.get( + "comment", "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string + ) return config def validate_caller_reference(self, caller_reference): @@ -2028,37 +2293,52 @@ class CloudFrontValidationManager(object): def get_first_origin_id_for_default_cache_behavior(self, valid_origins): try: if valid_origins is not None: - valid_origins_list = valid_origins.get('items') - if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0: - return str(valid_origins_list[0].get('id')) - self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.") + valid_origins_list = valid_origins.get("items") + if ( + valid_origins_list is not None + and isinstance(valid_origins_list, list) + and len(valid_origins_list) > 0 + ): + return str(valid_origins_list[0].get("id")) + self.module.fail_json( + msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration." + ) except Exception as e: self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior") def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list): try: self.validate_is_list(attribute_list, attribute_list_name) - if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or - isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)): - self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list))) + if ( + isinstance(allowed_list, list) + and set(attribute_list) not in allowed_list + or isinstance(allowed_list, set) + and not set(allowed_list).issuperset(attribute_list) + ): + attribute_list = " ".join(str(a) for a in allowed_list) + self.module.fail_json(msg=f"The attribute list {attribute_list_name} must be one of [{attribute_list}]") except Exception as e: self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): if attribute is not None and attribute not in allowed_list: - self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list))) + attribute_list = " ".join(str(a) for a in allowed_list) + self.module.fail_json(msg=f"The attribute {attribute_name} must be one of [{attribute_list}]") def validate_distribution_from_caller_reference(self, caller_reference): try: - distributions = self.__cloudfront_facts_mgr.list_distributions(False) - distribution_name = 'Distribution' - distribution_config_name = 'DistributionConfig' - distribution_ids = [dist.get('Id') for dist in distributions] + distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) + distribution_name = "Distribution" + distribution_config_name = "DistributionConfig" + distribution_ids = [dist.get("Id") for dist in distributions] for distribution_id in distribution_ids: - distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id) + distribution = self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) if distribution is not None: distribution_config = distribution[distribution_name].get(distribution_config_name) - if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference: + if ( + distribution_config is not None + and distribution_config.get("CallerReference") == caller_reference + ): distribution[distribution_name][distribution_config_name] = distribution_config return distribution @@ -2073,68 +2353,73 @@ class CloudFrontValidationManager(object): if aliases and distribution_id is None: distribution_id = self.validate_distribution_id_from_alias(aliases) if distribution_id: - return self.__cloudfront_facts_mgr.get_distribution(distribution_id) + return self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) return None except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference") + self.module.fail_json_aws( + e, msg="Error validating distribution_id from alias, aliases and caller reference" + ) def validate_distribution_id_from_alias(self, aliases): - distributions = self.__cloudfront_facts_mgr.list_distributions(False) + distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) if distributions: for distribution in distributions: - distribution_aliases = distribution.get('Aliases', {}).get('Items', []) + distribution_aliases = distribution.get("Aliases", {}).get("Items", []) if set(aliases) & set(distribution_aliases): - return distribution['Id'] + return distribution["Id"] return None def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: - distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id'] + distribution = self.validate_distribution_from_caller_reference(caller_reference=caller_reference) + distribution_id = distribution["Distribution"]["Id"] try: - waiter = client.get_waiter('distribution_deployed') + waiter = client.get_waiter("distribution_deployed") attempts = 1 + int(wait_timeout / 60) - waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(Id=distribution_id, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action." - " Waited for {0} seconds before timeout.".format(to_text(wait_timeout))) + self.module.fail_json_aws( + e, + msg=f"Timeout waiting for CloudFront action. Waited for {to_text(wait_timeout)} seconds before timeout.", + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id)) + self.module.fail_json_aws(e, msg=f"Error getting distribution {distribution_id}") def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), caller_reference=dict(), comment=dict(), distribution_id=dict(), e_tag=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), alias=dict(), - aliases=dict(type='list', default=[], elements='str'), - purge_aliases=dict(type='bool', default=False), + aliases=dict(type="list", default=[], elements="str"), + purge_aliases=dict(type="bool", default=False), default_root_object=dict(), - origins=dict(type='list', elements='dict'), - purge_origins=dict(type='bool', default=False), - default_cache_behavior=dict(type='dict'), - cache_behaviors=dict(type='list', elements='dict'), - purge_cache_behaviors=dict(type='bool', default=False), - custom_error_responses=dict(type='list', elements='dict'), - purge_custom_error_responses=dict(type='bool', default=False), - logging=dict(type='dict'), + origins=dict(type="list", elements="dict"), + purge_origins=dict(type="bool", default=False), + default_cache_behavior=dict(type="dict"), + cache_behaviors=dict(type="list", elements="dict"), + purge_cache_behaviors=dict(type="bool", default=False), + custom_error_responses=dict(type="list", elements="dict"), + purge_custom_error_responses=dict(type="bool", default=False), + logging=dict(type="dict"), price_class=dict(), - enabled=dict(type='bool'), - viewer_certificate=dict(type='dict'), - restrictions=dict(type='dict'), + enabled=dict(type="bool"), + viewer_certificate=dict(type="dict"), + restrictions=dict(type="dict"), web_acl_id=dict(), http_version=dict(), - ipv6_enabled=dict(type='bool'), + ipv6_enabled=dict(type="bool"), default_origin_domain_name=dict(), default_origin_path=dict(), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1800, type='int') + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1800, type="int"), ) result = {} @@ -2144,129 +2429,154 @@ def main(): argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[ - ['distribution_id', 'alias'], - ['default_origin_domain_name', 'distribution_id'], - ['default_origin_domain_name', 'alias'], - ] + ["distribution_id", "alias"], + ["default_origin_domain_name", "distribution_id"], + ["default_origin_domain_name", "alias"], + ], ) - client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff()) validation_mgr = CloudFrontValidationManager(module) - state = module.params.get('state') - caller_reference = module.params.get('caller_reference') - comment = module.params.get('comment') - e_tag = module.params.get('e_tag') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - distribution_id = module.params.get('distribution_id') - alias = module.params.get('alias') - aliases = module.params.get('aliases') - purge_aliases = module.params.get('purge_aliases') - default_root_object = module.params.get('default_root_object') - origins = module.params.get('origins') - purge_origins = module.params.get('purge_origins') - default_cache_behavior = module.params.get('default_cache_behavior') - cache_behaviors = module.params.get('cache_behaviors') - purge_cache_behaviors = module.params.get('purge_cache_behaviors') - custom_error_responses = module.params.get('custom_error_responses') - purge_custom_error_responses = module.params.get('purge_custom_error_responses') - logging = module.params.get('logging') - price_class = module.params.get('price_class') - enabled = module.params.get('enabled') - viewer_certificate = module.params.get('viewer_certificate') - restrictions = module.params.get('restrictions') - purge_restrictions = module.params.get('purge_restrictions') - web_acl_id = module.params.get('web_acl_id') - http_version = module.params.get('http_version') - ipv6_enabled = module.params.get('ipv6_enabled') - default_origin_domain_name = module.params.get('default_origin_domain_name') - default_origin_path = module.params.get('default_origin_path') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + state = module.params.get("state") + caller_reference = module.params.get("caller_reference") + comment = module.params.get("comment") + e_tag = module.params.get("e_tag") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + distribution_id = module.params.get("distribution_id") + alias = module.params.get("alias") + aliases = module.params.get("aliases") + purge_aliases = module.params.get("purge_aliases") + default_root_object = module.params.get("default_root_object") + origins = module.params.get("origins") + purge_origins = module.params.get("purge_origins") + default_cache_behavior = module.params.get("default_cache_behavior") + cache_behaviors = module.params.get("cache_behaviors") + purge_cache_behaviors = module.params.get("purge_cache_behaviors") + custom_error_responses = module.params.get("custom_error_responses") + purge_custom_error_responses = module.params.get("purge_custom_error_responses") + logging = module.params.get("logging") + price_class = module.params.get("price_class") + enabled = module.params.get("enabled") + viewer_certificate = module.params.get("viewer_certificate") + restrictions = module.params.get("restrictions") + purge_restrictions = module.params.get("purge_restrictions") + web_acl_id = module.params.get("web_acl_id") + http_version = module.params.get("http_version") + ipv6_enabled = module.params.get("ipv6_enabled") + default_origin_domain_name = module.params.get("default_origin_domain_name") + default_origin_path = module.params.get("default_origin_path") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") if alias and alias not in aliases: aliases.append(alias) - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( + distribution_id, aliases, caller_reference + ) - update = state == 'present' and distribution - create = state == 'present' and not distribution - delete = state == 'absent' and distribution + update = state == "present" and distribution + create = state == "present" and not distribution + delete = state == "absent" and distribution if not (update or create or delete): module.exit_json(changed=False) + config = {} if update or delete: - config = distribution['Distribution']['DistributionConfig'] - e_tag = distribution['ETag'] - distribution_id = distribution['Distribution']['Id'] - else: - config = dict() + config = distribution["Distribution"]["DistributionConfig"] + e_tag = distribution["ETag"] + distribution_id = distribution["Distribution"]["Id"] + if update: config = camel_dict_to_snake_dict(config, reversible=True) if create or update: - config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases) - config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id) - config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name, - default_origin_path, create, purge_origins) - config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []), - cache_behaviors, config['origins'], purge_cache_behaviors) - config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}), - default_cache_behavior, config['origins'], True) - config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []), - custom_error_responses, purge_custom_error_responses) - valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions) + config = validation_mgr.validate_common_distribution_parameters( + config, enabled, aliases, logging, price_class, purge_aliases + ) + config = validation_mgr.validate_distribution_config_parameters( + config, default_root_object, ipv6_enabled, http_version, web_acl_id + ) + config["origins"] = validation_mgr.validate_origins( + client, + config.get("origins", {}).get("items", []), + origins, + default_origin_domain_name, + default_origin_path, + create, + purge_origins, + ) + config["cache_behaviors"] = validation_mgr.validate_cache_behaviors( + config.get("cache_behaviors", {}).get("items", []), + cache_behaviors, + config["origins"], + purge_cache_behaviors, + ) + config["default_cache_behavior"] = validation_mgr.validate_cache_behavior( + config.get("default_cache_behavior", {}), default_cache_behavior, config["origins"], True + ) + config["custom_error_responses"] = validation_mgr.validate_custom_error_responses( + config.get("custom_error_responses", {}).get("items", []), + custom_error_responses, + purge_custom_error_responses, + ) + valid_restrictions = validation_mgr.validate_restrictions( + config.get("restrictions", {}), restrictions, purge_restrictions + ) if valid_restrictions: - config['restrictions'] = valid_restrictions + config["restrictions"] = valid_restrictions valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate) - config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate') + config = merge_validation_into_config(config, valid_viewer_certificate, "viewer_certificate") config = validation_mgr.validate_comment(config, comment) config = snake_dict_to_camel_dict(config, capitalize_first=True) if create: - config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference) + config["CallerReference"] = validation_mgr.validate_caller_reference(caller_reference) result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags or {})) result = camel_dict_to_snake_dict(result) - result['tags'] = list_tags_for_resource(client, module, result['arn']) + result["tags"] = list_tags_for_resource(client, module, result["arn"]) if delete: - if config['Enabled']: - config['Enabled'] = False + if config["Enabled"]: + config["Enabled"] = False result = update_distribution(client, module, config, distribution_id, e_tag) - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( + distribution_id, aliases, caller_reference + ) # e_tag = distribution['ETag'] result = delete_distribution(client, module, distribution) if update: - changed = config != distribution['Distribution']['DistributionConfig'] + changed = config != distribution["Distribution"]["DistributionConfig"] if changed: result = update_distribution(client, module, config, distribution_id, e_tag) else: - result = distribution['Distribution'] - existing_tags = list_tags_for_resource(client, module, result['ARN']) - distribution['Distribution']['DistributionConfig']['tags'] = existing_tags - changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN']) + result = distribution["Distribution"] + existing_tags = list_tags_for_resource(client, module, result["ARN"]) + distribution["Distribution"]["DistributionConfig"]["tags"] = existing_tags + changed |= update_tags(client, module, existing_tags, tags, purge_tags, result["ARN"]) result = camel_dict_to_snake_dict(result) - result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn']) - result['diff'] = dict() - diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config) + result["distribution_config"]["tags"] = config["tags"] = list_tags_for_resource(client, module, result["arn"]) + result["diff"] = dict() + diff = recursive_diff(distribution["Distribution"]["DistributionConfig"], config) if diff: - result['diff']['before'] = diff[0] - result['diff']['after'] = diff[1] + result["diff"]["before"] = diff[0] + result["diff"]["after"] = diff[1] if wait and (create or update): - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) - if 'distribution_config' in result: - result.update(result['distribution_config']) - del result['distribution_config'] + if "distribution_config" in result: + result.update(result["distribution_config"]) + del result["distribution_config"] module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py index cb97664fa..3bd20868a 100644 --- a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py +++ b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudfront_distribution_info version_added: 1.0.0 @@ -143,12 +141,12 @@ options: type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get a summary of distributions @@ -191,9 +189,9 @@ EXAMPLES = ''' - name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) community.aws.cloudfront_distribution_info: all_lists: true -''' +""" -RETURN = ''' +RETURN = r""" origin_access_identity: description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set. returned: only if I(origin_access_identity) is true @@ -242,405 +240,169 @@ result: as figuring out the DistributionId is usually the reason one uses this module in the first place. returned: always type: dict -''' - -import traceback - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -class CloudFrontServiceManager: - """Handles CloudFront Services""" - - def __init__(self, module): - self.module = module - - try: - self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - def get_distribution(self, distribution_id): - try: - distribution = self.client.get_distribution(aws_retry=True, Id=distribution_id) - return distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing distribution") - - def get_distribution_config(self, distribution_id): - try: - distribution = self.client.get_distribution_config(aws_retry=True, Id=distribution_id) - return distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing distribution configuration") - - def get_origin_access_identity(self, origin_access_identity_id): - try: - origin_access_identity = self.client.get_cloud_front_origin_access_identity(aws_retry=True, Id=origin_access_identity_id) - return origin_access_identity - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity") - - def get_origin_access_identity_config(self, origin_access_identity_id): - try: - origin_access_identity = self.client.get_cloud_front_origin_access_identity_config(aws_retry=True, Id=origin_access_identity_id) - return origin_access_identity - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity configuration") - - def get_invalidation(self, distribution_id, invalidation_id): - try: - invalidation = self.client.get_invalidation(aws_retry=True, DistributionId=distribution_id, Id=invalidation_id) - return invalidation - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing invalidation") - - def get_streaming_distribution(self, distribution_id): - try: - streaming_distribution = self.client.get_streaming_distribution(aws_retry=True, Id=distribution_id) - return streaming_distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - def get_streaming_distribution_config(self, distribution_id): - try: - streaming_distribution = self.client.get_streaming_distribution_config(aws_retry=True, Id=distribution_id) - return streaming_distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - # Split out paginator to allow for the backoff decorator to function - @AWSRetry.jittered_backoff() - def _paginated_result(self, paginator_name, **params): - paginator = self.client.get_paginator(paginator_name) - results = paginator.paginate(**params).build_full_result() - return results - - def list_origin_access_identities(self): - try: - results = self._paginated_result('list_cloud_front_origin_access_identities') - origin_access_identity_list = results.get('CloudFrontOriginAccessIdentityList', {'Items': []}) - - if len(origin_access_identity_list['Items']) > 0: - return origin_access_identity_list['Items'] - return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities") - - def list_distributions(self, keyed=True): - try: - results = self._paginated_result('list_distributions') - distribution_list = results.get('DistributionList', {'Items': []}) - - if len(distribution_list['Items']) > 0: - distribution_list = distribution_list['Items'] - else: - return {} - - if not keyed: - return distribution_list - return self.keyed_list_helper(distribution_list) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing distributions") - - def list_distributions_by_web_acl_id(self, web_acl_id): - try: - results = self._paginated_result('list_cloud_front_origin_access_identities', WebAclId=web_acl_id) - distribution_list = results.get('DistributionList', {'Items': []}) - - if len(distribution_list['Items']) > 0: - distribution_list = distribution_list['Items'] - else: - return {} - return self.keyed_list_helper(distribution_list) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing distributions by web acl id") - - def list_invalidations(self, distribution_id): - try: - results = self._paginated_result('list_invalidations', DistributionId=distribution_id) - invalidation_list = results.get('InvalidationList', {'Items': []}) - - if len(invalidation_list['Items']) > 0: - return invalidation_list['Items'] - return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing invalidations") - - def list_streaming_distributions(self, keyed=True): - try: - results = self._paginated_result('list_streaming_distributions') - streaming_distribution_list = results.get('StreamingDistributionList', {'Items': []}) - - if len(streaming_distribution_list['Items']) > 0: - streaming_distribution_list = streaming_distribution_list['Items'] - else: - return {} - - if not keyed: - return streaming_distribution_list - return self.keyed_list_helper(streaming_distribution_list) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing streaming distributions") - - def summary(self): - summary_dict = {} - summary_dict.update(self.summary_get_distribution_list(False)) - summary_dict.update(self.summary_get_distribution_list(True)) - summary_dict.update(self.summary_get_origin_access_identity_list()) - return summary_dict - - def summary_get_origin_access_identity_list(self): - try: - origin_access_identity_list = {'origin_access_identities': []} - origin_access_identities = self.list_origin_access_identities() - for origin_access_identity in origin_access_identities: - oai_id = origin_access_identity['Id'] - oai_full_response = self.get_origin_access_identity(oai_id) - oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']} - origin_access_identity_list['origin_access_identities'].append(oai_summary) - return origin_access_identity_list - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error generating summary of origin access identities") - - def summary_get_distribution_list(self, streaming=False): - try: - list_name = 'streaming_distributions' if streaming else 'distributions' - key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled'] - distribution_list = {list_name: []} - distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False) - for dist in distributions: - temp_distribution = {} - for key_name in key_list: - temp_distribution[key_name] = dist[key_name] - temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])] - temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming) - if not streaming: - temp_distribution['WebACLId'] = dist['WebACLId'] - invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id']) - if invalidation_ids: - temp_distribution['Invalidations'] = invalidation_ids - resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN']) - temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', [])) - distribution_list[list_name].append(temp_distribution) - return distribution_list - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error generating summary of distributions") - except Exception as e: - self.module.fail_json(msg="Error generating summary of distributions - " + str(e), - exception=traceback.format_exc()) - - def get_etag_from_distribution_id(self, distribution_id, streaming): - distribution = {} - if not streaming: - distribution = self.get_distribution(distribution_id) - else: - distribution = self.get_streaming_distribution(distribution_id) - return distribution['ETag'] - - def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id): - try: - invalidation_ids = [] - invalidations = self.list_invalidations(distribution_id) - for invalidation in invalidations: - invalidation_ids.append(invalidation['Id']) - return invalidation_ids - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting list of invalidation ids") - - def get_distribution_id_from_domain_name(self, domain_name): - try: - distribution_id = "" - distributions = self.list_distributions(False) - distributions += self.list_streaming_distributions(False) - for dist in distributions: - if 'Items' in dist['Aliases']: - for alias in dist['Aliases']['Items']: - if str(alias).lower() == domain_name.lower(): - distribution_id = dist['Id'] - break - return distribution_id - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting distribution id from domain name") - - def get_aliases_from_distribution_id(self, distribution_id): - aliases = [] - try: - distributions = self.list_distributions(False) - for dist in distributions: - if dist['Id'] == distribution_id and 'Items' in dist['Aliases']: - for alias in dist['Aliases']['Items']: - aliases.append(alias) - break - return aliases - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id") - - def keyed_list_helper(self, list_to_key): - keyed_list = dict() - for item in list_to_key: - distribution_id = item['Id'] - if 'Items' in item['Aliases']: - aliases = item['Aliases']['Items'] - for alias in aliases: - keyed_list.update({alias: item}) - keyed_list.update({distribution_id: item}) - return keyed_list +""" + +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases): - facts[distribution_id].update(details) + facts[distribution_id] = details # also have a fixed key for accessing results/details returned - facts['result'] = details - facts['result']['DistributionId'] = distribution_id + facts["result"] = details + facts["result"]["DistributionId"] = distribution_id for alias in aliases: - facts[alias].update(details) + facts[alias] = details return facts def main(): argument_spec = dict( - distribution_id=dict(required=False, type='str'), - invalidation_id=dict(required=False, type='str'), - origin_access_identity_id=dict(required=False, type='str'), - domain_name_alias=dict(required=False, type='str'), - all_lists=dict(required=False, default=False, type='bool'), - distribution=dict(required=False, default=False, type='bool'), - distribution_config=dict(required=False, default=False, type='bool'), - origin_access_identity=dict(required=False, default=False, type='bool'), - origin_access_identity_config=dict(required=False, default=False, type='bool'), - invalidation=dict(required=False, default=False, type='bool'), - streaming_distribution=dict(required=False, default=False, type='bool'), - streaming_distribution_config=dict(required=False, default=False, type='bool'), - list_origin_access_identities=dict(required=False, default=False, type='bool'), - list_distributions=dict(required=False, default=False, type='bool'), - list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'), - list_invalidations=dict(required=False, default=False, type='bool'), - list_streaming_distributions=dict(required=False, default=False, type='bool'), - summary=dict(required=False, default=False, type='bool'), + distribution_id=dict(required=False, type="str"), + invalidation_id=dict(required=False, type="str"), + origin_access_identity_id=dict(required=False, type="str"), + domain_name_alias=dict(required=False, type="str"), + all_lists=dict(required=False, default=False, type="bool"), + distribution=dict(required=False, default=False, type="bool"), + distribution_config=dict(required=False, default=False, type="bool"), + origin_access_identity=dict(required=False, default=False, type="bool"), + origin_access_identity_config=dict(required=False, default=False, type="bool"), + invalidation=dict(required=False, default=False, type="bool"), + streaming_distribution=dict(required=False, default=False, type="bool"), + streaming_distribution_config=dict(required=False, default=False, type="bool"), + list_origin_access_identities=dict(required=False, default=False, type="bool"), + list_distributions=dict(required=False, default=False, type="bool"), + list_distributions_by_web_acl_id=dict(required=False, default=False, type="bool"), + list_invalidations=dict(required=False, default=False, type="bool"), + list_streaming_distributions=dict(required=False, default=False, type="bool"), + summary=dict(required=False, default=False, type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - service_mgr = CloudFrontServiceManager(module) - - distribution_id = module.params.get('distribution_id') - invalidation_id = module.params.get('invalidation_id') - origin_access_identity_id = module.params.get('origin_access_identity_id') - web_acl_id = module.params.get('web_acl_id') - domain_name_alias = module.params.get('domain_name_alias') - all_lists = module.params.get('all_lists') - distribution = module.params.get('distribution') - distribution_config = module.params.get('distribution_config') - origin_access_identity = module.params.get('origin_access_identity') - origin_access_identity_config = module.params.get('origin_access_identity_config') - invalidation = module.params.get('invalidation') - streaming_distribution = module.params.get('streaming_distribution') - streaming_distribution_config = module.params.get('streaming_distribution_config') - list_origin_access_identities = module.params.get('list_origin_access_identities') - list_distributions = module.params.get('list_distributions') - list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id') - list_invalidations = module.params.get('list_invalidations') - list_streaming_distributions = module.params.get('list_streaming_distributions') - summary = module.params.get('summary') + service_mgr = CloudFrontFactsServiceManager(module) + + distribution_id = module.params.get("distribution_id") + invalidation_id = module.params.get("invalidation_id") + origin_access_identity_id = module.params.get("origin_access_identity_id") + web_acl_id = module.params.get("web_acl_id") + domain_name_alias = module.params.get("domain_name_alias") + all_lists = module.params.get("all_lists") + distribution = module.params.get("distribution") + distribution_config = module.params.get("distribution_config") + origin_access_identity = module.params.get("origin_access_identity") + origin_access_identity_config = module.params.get("origin_access_identity_config") + invalidation = module.params.get("invalidation") + streaming_distribution = module.params.get("streaming_distribution") + streaming_distribution_config = module.params.get("streaming_distribution_config") + list_origin_access_identities = module.params.get("list_origin_access_identities") + list_distributions = module.params.get("list_distributions") + list_distributions_by_web_acl_id = module.params.get("list_distributions_by_web_acl_id") + list_invalidations = module.params.get("list_invalidations") + list_streaming_distributions = module.params.get("list_streaming_distributions") + summary = module.params.get("summary") aliases = [] - result = {'cloudfront': {}} + result = {"cloudfront": {}} facts = {} - require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or - streaming_distribution_config or list_invalidations) + require_distribution_id = ( + distribution + or distribution_config + or invalidation + or streaming_distribution + or streaming_distribution_config + or list_invalidations + ) # set default to summary if no option specified - summary = summary or not (distribution or distribution_config or origin_access_identity or - origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or - list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or - list_streaming_distributions or list_distributions) + summary = summary or not ( + distribution + or distribution_config + or origin_access_identity + or origin_access_identity_config + or invalidation + or streaming_distribution + or streaming_distribution_config + or list_origin_access_identities + or list_distributions_by_web_acl_id + or list_invalidations + or list_streaming_distributions + or list_distributions + ) # validations if require_distribution_id and distribution_id is None and domain_name_alias is None: - module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.') - if (invalidation and invalidation_id is None): - module.fail_json(msg='Error invalidation_id has not been specified.') + module.fail_json(msg="Error distribution_id or domain_name_alias have not been specified.") + if invalidation and invalidation_id is None: + module.fail_json(msg="Error invalidation_id has not been specified.") if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None: - module.fail_json(msg='Error origin_access_identity_id has not been specified.') + module.fail_json(msg="Error origin_access_identity_id has not been specified.") if list_distributions_by_web_acl_id and web_acl_id is None: - module.fail_json(msg='Error web_acl_id has not been specified.') + module.fail_json(msg="Error web_acl_id has not been specified.") # get distribution id from domain name alias if require_distribution_id and distribution_id is None: distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias) if not distribution_id: - module.fail_json(msg='Error unable to source a distribution id from domain_name_alias') + module.fail_json(msg="Error unable to source a distribution id from domain_name_alias") # set appropriate cloudfront id - if distribution_id and not list_invalidations: - facts = {distribution_id: {}} - aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) - for alias in aliases: - facts.update({alias: {}}) - if invalidation_id: - facts.update({invalidation_id: {}}) - elif distribution_id and list_invalidations: - facts = {distribution_id: {}} - aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) - for alias in aliases: - facts.update({alias: {}}) - elif origin_access_identity_id: - facts = {origin_access_identity_id: {}} - elif web_acl_id: - facts = {web_acl_id: {}} + if invalidation_id is not None and invalidation: + facts.update({invalidation_id: {}}) + if origin_access_identity_id and (origin_access_identity or origin_access_identity_config): + facts.update({origin_access_identity_id: {}}) + if web_acl_id: + facts.update({web_acl_id: {}}) # get details based on options if distribution: - facts_to_set = service_mgr.get_distribution(distribution_id) + facts_to_set = service_mgr.get_distribution(id=distribution_id) if distribution_config: - facts_to_set = service_mgr.get_distribution_config(distribution_id) + facts_to_set = service_mgr.get_distribution_config(id=distribution_id) if origin_access_identity: - facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id)) + facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(id=origin_access_identity_id)) if origin_access_identity_config: - facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id)) + facts[origin_access_identity_id].update( + service_mgr.get_origin_access_identity_config(id=origin_access_identity_id) + ) if invalidation: - facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id) + facts_to_set = service_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation_id) facts[invalidation_id].update(facts_to_set) if streaming_distribution: - facts_to_set = service_mgr.get_streaming_distribution(distribution_id) + facts_to_set = service_mgr.get_streaming_distribution(id=distribution_id) if streaming_distribution_config: - facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id) + facts_to_set = service_mgr.get_streaming_distribution_config(id=distribution_id) if list_invalidations: - facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)} - if 'facts_to_set' in vars(): + invalidations = service_mgr.list_invalidations(distribution_id=distribution_id) or {} + facts_to_set = {"invalidations": invalidations} + if "facts_to_set" in vars(): + aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases) # get list based on options if all_lists or list_origin_access_identities: - facts['origin_access_identities'] = service_mgr.list_origin_access_identities() + facts["origin_access_identities"] = service_mgr.list_origin_access_identities() or {} if all_lists or list_distributions: - facts['distributions'] = service_mgr.list_distributions() + facts["distributions"] = service_mgr.list_distributions() or {} if all_lists or list_streaming_distributions: - facts['streaming_distributions'] = service_mgr.list_streaming_distributions() + facts["streaming_distributions"] = service_mgr.list_streaming_distributions() or {} if list_distributions_by_web_acl_id: - facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id) + facts["distributions_by_web_acl_id"] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {} if list_invalidations: - facts['invalidations'] = service_mgr.list_invalidations(distribution_id) + facts["invalidations"] = service_mgr.list_invalidations(distribution_id=distribution_id) or {} # default summary option if summary: - facts['summary'] = service_mgr.summary() + facts["summary"] = service_mgr.summary() - result['changed'] = False - result['cloudfront'].update(facts) + result["changed"] = False + result["cloudfront"].update(facts) module.exit_json(msg="Retrieved CloudFront info.", **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py b/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py index 767a1d181..732d135e1 100644 --- a/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py +++ b/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- version_added: 1.0.0 @@ -14,15 +12,10 @@ module: cloudfront_invalidation short_description: create invalidations for AWS CloudFront distributions description: - - Allows for invalidation of a batch of paths for a CloudFront distribution. - -author: Willem van Ketwich (@wilvk) - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - Allows for invalidation of a batch of paths for a CloudFront distribution. +author: + - Willem van Ketwich (@wilvk) options: distribution_id: @@ -52,10 +45,13 @@ options: notes: - does not support check mode -''' - -EXAMPLES = r''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" +EXAMPLES = r""" - name: create a batch of invalidations using a distribution_id for a reference community.aws.cloudfront_invalidation: distribution_id: E15BU8SDCGSG57 @@ -73,10 +69,9 @@ EXAMPLES = r''' - /testpathone/test4.css - /testpathtwo/test5.js - /testpaththree/* +""" -''' - -RETURN = r''' +RETURN = r""" invalidation: description: The invalidation's information. returned: always @@ -130,7 +125,7 @@ location: returned: always type: str sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622 -''' +""" import datetime @@ -142,60 +137,61 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class CloudFrontInvalidationServiceManager(object): """ Handles CloudFront service calls to AWS for invalidations """ - def __init__(self, module): + def __init__(self, module, cloudfront_facts_mgr): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") + self.__cloudfront_facts_mgr = cloudfront_facts_mgr def create_invalidation(self, distribution_id, invalidation_batch): - current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference']) + current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch["CallerReference"]) try: - response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch) - response.pop('ResponseMetadata', None) + response = self.client.create_invalidation( + DistributionId=distribution_id, InvalidationBatch=invalidation_batch + ) + response.pop("ResponseMetadata", None) if current_invalidation_response: return response, False else: return response, True - except is_boto3_error_message('Your request contains a caller reference that was used for a previous invalidation ' - 'batch for the same distribution.'): - self.module.warn("InvalidationBatch target paths are not modifiable. " - "To make a new invalidation please update caller_reference.") + except is_boto3_error_message( + "Your request contains a caller reference that was used for a previous invalidation " + "batch for the same distribution." + ): + self.module.warn( + "InvalidationBatch target paths are not modifiable. " + "To make a new invalidation please update caller_reference." + ) return current_invalidation_response, False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") def get_invalidation(self, distribution_id, caller_reference): - current_invalidation = {} # find all invalidations for the distribution - try: - paginator = self.client.get_paginator('list_invalidations') - invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', []) - invalidation_ids = [inv['Id'] for inv in invalidations] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.") + invalidations = self.__cloudfront_facts_mgr.list_invalidations(distribution_id=distribution_id) # check if there is an invalidation with the same caller reference - for inv_id in invalidation_ids: - try: - invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation'] - caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id)) - if caller_ref == caller_reference: - current_invalidation = invalidation - break - - current_invalidation.pop('ResponseMetadata', None) - return current_invalidation + for invalidation in invalidations: + invalidation_info = self.__cloudfront_facts_mgr.get_invalidation( + distribution_id=distribution_id, id=invalidation["Id"] + ) + if invalidation_info.get("InvalidationBatch", {}).get("CallerReference") == caller_reference: + invalidation_info.pop("ResponseMetadata", None) + return invalidation_info + return {} class CloudFrontInvalidationValidationManager(object): @@ -203,9 +199,9 @@ class CloudFrontInvalidationValidationManager(object): Manages CloudFront validations for invalidation batches """ - def __init__(self, module): + def __init__(self, module, cloudfront_facts_mgr): self.module = module - self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) + self.__cloudfront_facts_mgr = cloudfront_facts_mgr def validate_distribution_id(self, distribution_id, alias): try: @@ -230,8 +226,8 @@ class CloudFrontInvalidationValidationManager(object): else: valid_caller_reference = datetime.datetime.now().isoformat() valid_invalidation_batch = { - 'paths': self.create_aws_list(invalidation_batch), - 'caller_reference': valid_caller_reference + "paths": self.create_aws_list(invalidation_batch), + "caller_reference": valid_caller_reference, } return valid_invalidation_batch except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -243,18 +239,21 @@ def main(): caller_reference=dict(), distribution_id=dict(), alias=dict(), - target_paths=dict(required=True, type='list', elements='str') + target_paths=dict(required=True, type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[["distribution_id", "alias"]] + ) - validation_mgr = CloudFrontInvalidationValidationManager(module) - service_mgr = CloudFrontInvalidationServiceManager(module) + cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) + validation_mgr = CloudFrontInvalidationValidationManager(module, cloudfront_facts_mgr) + service_mgr = CloudFrontInvalidationServiceManager(module, cloudfront_facts_mgr) - caller_reference = module.params.get('caller_reference') - distribution_id = module.params.get('distribution_id') - alias = module.params.get('alias') - target_paths = module.params.get('target_paths') + caller_reference = module.params.get("caller_reference") + distribution_id = module.params.get("distribution_id") + alias = module.params.get("alias") + target_paths = module.params.get("target_paths") result = {} @@ -266,5 +265,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py b/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py index c6879d0c5..bb5e3a017 100644 --- a/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py +++ b/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- version_added: 1.0.0 @@ -16,16 +14,11 @@ short_description: Create, update and delete origin access identities for a CloudFront distribution description: - - Allows for easy creation, updating and deletion of origin access - identities. - -author: Willem van Ketwich (@wilvk) - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - Allows for easy creation, updating and deletion of origin access + identities. +author: + - Willem van Ketwich (@wilvk) options: state: @@ -54,9 +47,13 @@ options: notes: - Does not support check mode. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: create an origin access identity community.aws.cloudfront_origin_access_identity: @@ -66,19 +63,18 @@ EXAMPLES = ''' - name: update an existing origin access identity using caller_reference as an identifier community.aws.cloudfront_origin_access_identity: - origin_access_identity_id: E17DRN9XUOAHZX - caller_reference: this is an example reference - comment: this is a new comment + origin_access_identity_id: E17DRN9XUOAHZX + caller_reference: this is an example reference + comment: this is a new comment - name: delete an existing origin access identity using caller_reference as an identifier community.aws.cloudfront_origin_access_identity: - state: absent - caller_reference: this is an example reference - comment: this is a new comment - -''' + state: absent + caller_reference: this is an example reference + comment: this is a new comment +""" -RETURN = ''' +RETURN = r""" cloud_front_origin_access_identity: description: The origin access identity's information. returned: always @@ -113,20 +109,22 @@ location: description: The fully qualified URI of the new origin access identity just created. returned: when initially created type: str - -''' +""" import datetime try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class CloudFrontOriginAccessIdentityServiceManager(object): @@ -136,35 +134,31 @@ class CloudFrontOriginAccessIdentityServiceManager(object): def __init__(self, module): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") def create_origin_access_identity(self, caller_reference, comment): try: return self.client.create_cloud_front_origin_access_identity( - CloudFrontOriginAccessIdentityConfig={ - 'CallerReference': caller_reference, - 'Comment': comment - } + CloudFrontOriginAccessIdentityConfig={"CallerReference": caller_reference, "Comment": comment} ) except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.") def delete_origin_access_identity(self, origin_access_identity_id, e_tag): try: - return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag) + result = self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag) + return result, True except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.") + self.module.fail_json_aws(e, msg="Error deleting Origin Access Identity.") def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag): changed = False - new_config = { - 'CallerReference': caller_reference, - 'Comment': comment - } + new_config = {"CallerReference": caller_reference, "Comment": comment} try: - current_config = self.client.get_cloud_front_origin_access_identity_config( - Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig'] + current_config = self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)[ + "CloudFrontOriginAccessIdentityConfig" + ] except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.") @@ -194,38 +188,54 @@ class CloudFrontOriginAccessIdentityValidationManager(object): self.module = module self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) - def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id): + def describe_origin_access_identity(self, origin_access_identity_id, fail_if_missing=True): try: - if origin_access_identity_id is None: - return - oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id) - if oai is not None: - return oai.get('ETag') - except (ClientError, BotoCoreError) as e: + return self.__cloudfront_facts_mgr.get_origin_access_identity( + id=origin_access_identity_id, fail_if_error=False + ) + except is_boto3_error_code("NoSuchCloudFrontOriginAccessIdentity") as e: # pylint: disable=duplicate-except + if fail_if_missing: + self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") + return {} + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") - def validate_origin_access_identity_id_from_caller_reference( - self, caller_reference): - try: - origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() - origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities] - for origin_access_identity_id in origin_origin_access_identity_ids: - oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id)) - temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference') - if temp_caller_reference == caller_reference: - return origin_access_identity_id - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.") + def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id, fail_if_missing): + oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing) + if oai is not None: + return oai.get("ETag") + + def validate_origin_access_identity_id_from_caller_reference(self, caller_reference): + origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() + origin_origin_access_identity_ids = [oai.get("Id") for oai in origin_access_identities] + for origin_access_identity_id in origin_origin_access_identity_ids: + oai_config = self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id) + temp_caller_reference = oai_config.get("CloudFrontOriginAccessIdentityConfig").get("CallerReference") + if temp_caller_reference == caller_reference: + return origin_access_identity_id def validate_comment(self, comment): if comment is None: - return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime( + "%Y-%m-%dT%H:%M:%S.%f" + ) return comment + def validate_caller_reference_from_origin_access_identity_id(self, origin_access_identity_id, caller_reference): + if caller_reference is None: + if origin_access_identity_id is None: + return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") + oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing=True) + origin_access_config = oai.get("CloudFrontOriginAccessIdentity", {}).get( + "CloudFrontOriginAccessIdentityConfig", {} + ) + return origin_access_config.get("CallerReference") + return caller_reference + def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), origin_access_identity_id=dict(), caller_reference=dict(), comment=dict(), @@ -239,32 +249,41 @@ def main(): service_mgr = CloudFrontOriginAccessIdentityServiceManager(module) validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module) - state = module.params.get('state') - caller_reference = module.params.get('caller_reference') + state = module.params.get("state") + caller_reference = module.params.get("caller_reference") - comment = module.params.get('comment') - origin_access_identity_id = module.params.get('origin_access_identity_id') + comment = module.params.get("comment") + origin_access_identity_id = module.params.get("origin_access_identity_id") if origin_access_identity_id is None and caller_reference is not None: - origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference) - - e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id) - comment = validation_mgr.validate_comment(comment) - - if state == 'present': - if origin_access_identity_id is not None and e_tag is not None: - result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag) + origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference( + caller_reference + ) + + if state == "present": + comment = validation_mgr.validate_comment(comment) + caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id( + origin_access_identity_id, caller_reference + ) + if origin_access_identity_id is not None: + e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, True) + # update cloudfront origin access identity + result, changed = service_mgr.update_origin_access_identity( + caller_reference, comment, origin_access_identity_id, e_tag + ) else: + # create cloudfront origin access identity result = service_mgr.create_origin_access_identity(caller_reference, comment) changed = True - elif state == 'absent' and origin_access_identity_id is not None and e_tag is not None: - result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) - changed = True + else: + e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, False) + if e_tag: + result, changed = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) - result.pop('ResponseMetadata', None) + result.pop("ResponseMetadata", None) module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py b/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py index 01b38a3bd..a7558e8a8 100644 --- a/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py +++ b/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- version_added: 3.2.0 module: cloudfront_response_headers_policy @@ -14,16 +12,11 @@ module: cloudfront_response_headers_policy short_description: Create, update and delete response headers policies to be used in a Cloudfront distribution description: - - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers - - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy) - -author: Stefan Horning (@stefanhorning) - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers + - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy) +author: + - Stefan Horning (@stefanhorning) options: state: @@ -57,9 +50,13 @@ options: default: {} type: dict -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Creationg a Cloudfront header policy using all predefined header features and a custom header for demonstration community.aws.cloudfront_response_headers_policy: name: my-header-policy @@ -113,9 +110,9 @@ EXAMPLES = ''' community.aws.cloudfront_response_headers_policy: name: my-header-policy state: absent -''' +""" -RETURN = ''' +RETURN = r""" response_headers_policy: description: The policy's information returned: success @@ -141,40 +138,43 @@ response_headers_policy: type: str returned: always sample: my-header-policy -''' +""" + +import datetime try: - from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by imported AnsibleAWSModule -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -import datetime +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class CloudfrontResponseHeadersPolicyService(object): +class CloudfrontResponseHeadersPolicyService(object): def __init__(self, module): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") self.check_mode = module.check_mode def find_response_headers_policy(self, name): try: - policies = self.client.list_response_headers_policies()['ResponseHeadersPolicyList']['Items'] + policies = self.client.list_response_headers_policies()["ResponseHeadersPolicyList"]["Items"] for policy in policies: - if policy['ResponseHeadersPolicy']['ResponseHeadersPolicyConfig']['Name'] == name: - policy_id = policy['ResponseHeadersPolicy']['Id'] + if policy["ResponseHeadersPolicy"]["ResponseHeadersPolicyConfig"]["Name"] == name: + policy_id = policy["ResponseHeadersPolicy"]["Id"] # as the list_ request does not contain the Etag (which we need), we need to do another get_ request here - matching_policy = self.client.get_response_headers_policy(Id=policy['ResponseHeadersPolicy']['Id']) + matching_policy = self.client.get_response_headers_policy(Id=policy["ResponseHeadersPolicy"]["Id"]) break else: matching_policy = None return matching_policy - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error fetching policy information") def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config): @@ -182,17 +182,17 @@ class CloudfrontResponseHeadersPolicyService(object): security_headers_config = snake_dict_to_camel_dict(security_headers_config, capitalize_first=True) # Little helper for turning xss_protection into XSSProtection and not into XssProtection - if 'XssProtection' in security_headers_config: - security_headers_config['XSSProtection'] = security_headers_config.pop('XssProtection') + if "XssProtection" in security_headers_config: + security_headers_config["XSSProtection"] = security_headers_config.pop("XssProtection") custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True) config = { - 'Name': name, - 'Comment': comment, - 'CorsConfig': self.insert_quantities(cors_config), - 'SecurityHeadersConfig': security_headers_config, - 'CustomHeadersConfig': self.insert_quantities(custom_headers_config) + "Name": name, + "Comment": comment, + "CorsConfig": self.insert_quantities(cors_config), + "SecurityHeadersConfig": security_headers_config, + "CustomHeadersConfig": self.insert_quantities(custom_headers_config), } config = {k: v for k, v in config.items() if v} @@ -208,22 +208,24 @@ class CloudfrontResponseHeadersPolicyService(object): try: result = self.client.create_response_headers_policy(ResponseHeadersPolicyConfig=config) changed = True - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating policy") else: - policy_id = matching_policy['ResponseHeadersPolicy']['Id'] - etag = matching_policy['ETag'] + policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] + etag = matching_policy["ETag"] try: - result = self.client.update_response_headers_policy(Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config) + result = self.client.update_response_headers_policy( + Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config + ) - changed_time = result['ResponseHeadersPolicy']['LastModifiedTime'] + changed_time = result["ResponseHeadersPolicy"]["LastModifiedTime"] seconds = 3 # threshhold for returned timestamp age - seconds_ago = (datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds)) + seconds_ago = datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds) # consider change made by this execution of the module if returned timestamp was very recent if changed_time > seconds_ago: changed = True - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Updating creating policy") self.module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) @@ -234,14 +236,14 @@ class CloudfrontResponseHeadersPolicyService(object): if matching_policy is None: self.module.exit_json(msg="Didn't find a matching policy by that name, not deleting") else: - policy_id = matching_policy['ResponseHeadersPolicy']['Id'] - etag = matching_policy['ETag'] + policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] + etag = matching_policy["ETag"] if self.check_mode: result = {} else: try: result = self.client.delete_response_headers_policy(Id=policy_id, IfMatch=etag) - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error deleting policy") self.module.exit_json(changed=True, **camel_dict_to_snake_dict(result)) @@ -250,43 +252,45 @@ class CloudfrontResponseHeadersPolicyService(object): @staticmethod def insert_quantities(dict_with_items): # Items on top level case - if 'Items' in dict_with_items and isinstance(dict_with_items['Items'], list): - dict_with_items['Quantity'] = len(dict_with_items['Items']) + if "Items" in dict_with_items and isinstance(dict_with_items["Items"], list): + dict_with_items["Quantity"] = len(dict_with_items["Items"]) # Items on second level case for k, v in dict_with_items.items(): - if isinstance(v, dict) and 'Items' in v: - v['Quantity'] = len(v['Items']) + if isinstance(v, dict) and "Items" in v: + v["Quantity"] = len(v["Items"]) return dict_with_items def main(): argument_spec = dict( - name=dict(required=True, type='str'), - comment=dict(type='str'), - cors_config=dict(type='dict', default=dict()), - security_headers_config=dict(type='dict', default=dict()), - custom_headers_config=dict(type='dict', default=dict()), - state=dict(choices=['present', 'absent'], type='str', default='present'), + name=dict(required=True, type="str"), + comment=dict(type="str"), + cors_config=dict(type="dict", default=dict()), + security_headers_config=dict(type="dict", default=dict()), + custom_headers_config=dict(type="dict", default=dict()), + state=dict(choices=["present", "absent"], type="str", default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - name = module.params.get('name') - comment = module.params.get('comment', '') - cors_config = module.params.get('cors_config') - security_headers_config = module.params.get('security_headers_config') - custom_headers_config = module.params.get('custom_headers_config') - state = module.params.get('state') + name = module.params.get("name") + comment = module.params.get("comment", "") + cors_config = module.params.get("cors_config") + security_headers_config = module.params.get("security_headers_config") + custom_headers_config = module.params.get("custom_headers_config") + state = module.params.get("state") service = CloudfrontResponseHeadersPolicyService(module) - if state == 'absent': + if state == "absent": service.delete_response_header_policy(name) else: - service.create_response_header_policy(name, comment, cors_config, security_headers_config, custom_headers_config) + service.create_response_header_policy( + name, comment, cors_config, security_headers_config, custom_headers_config + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/codebuild_project.py b/ansible_collections/community/aws/plugins/modules/codebuild_project.py index 873b74010..1f4630f73 100644 --- a/ansible_collections/community/aws/plugins/modules/codebuild_project.py +++ b/ansible_collections/community/aws/plugins/modules/codebuild_project.py @@ -1,19 +1,17 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: codebuild_project version_added: 1.0.0 short_description: Create or delete an AWS CodeBuild project notes: - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html). + - I(tags) changed from boto3 format to standard dict format in release 6.0.0. description: - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code. - Prior to release 5.0.0 this module was called C(community.aws.aws_codebuild). @@ -137,23 +135,6 @@ options: description: - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts. type: str - tags: - description: - - A set of tags for the build project. - - Mutually exclusive with the I(resource_tags) parameter. - - In release 6.0.0 this parameter will accept a simple dictionary - instead of the list of dictionaries format. To use the simple - dictionary format prior to release 6.0.0 the I(resource_tags) can - be used instead of I(tags). - type: list - elements: dict - suboptions: - key: - description: The name of the Tag. - type: str - value: - description: The value of the Tag. - type: str vpc_config: description: - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC. @@ -164,35 +145,15 @@ options: default: 'present' choices: ['present', 'absent'] type: str - resource_tags: - description: - - A dictionary representing the tags to be applied to the build project. - - If the I(resource_tags) parameter is not set then tags will not be modified. - - Mutually exclusive with the I(tags) parameter. - type: dict - required: false - purge_tags: - description: - - If I(purge_tags=true) and I(tags) is set, existing tags will be purged - from the resource to match exactly what is defined by I(tags) parameter. - - If the I(resource_tags) parameter is not set then tags will not be modified, even - if I(purge_tags=True). - - Tag keys beginning with C(aws:) are reserved by Amazon and can not be - modified. As such they will be ignored for the purposes of the - I(purge_tags) parameter. See the Amazon documentation for more information - U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions). - type: bool - default: true - required: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.boto3.modules + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - community.aws.codebuild_project: @@ -200,27 +161,28 @@ EXAMPLES = r''' description: My nice little project service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role" source: - # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3 - type: CODEPIPELINE - buildspec: '' + # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3 + type: CODEPIPELINE + buildspec: '' artifacts: - namespaceType: NONE - packaging: NONE - type: CODEPIPELINE - name: my_project + namespaceType: NONE + packaging: NONE + type: CODEPIPELINE + name: my_project environment: - computeType: BUILD_GENERAL1_SMALL - privilegedMode: "true" - image: "aws/codebuild/docker:17.09.0" - type: LINUX_CONTAINER - environmentVariables: - - { name: 'PROFILE', value: 'staging' } + computeType: BUILD_GENERAL1_SMALL + privilegedMode: "true" + image: "aws/codebuild/docker:17.09.0" + type: LINUX_CONTAINER + environmentVariables: + - name: 'PROFILE' + value: 'staging' encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3" region: us-east-1 state: present -''' +""" -RETURN = r''' +RETURN = r""" project: description: Returns the dictionary describing the code project configuration. returned: success @@ -324,118 +286,162 @@ project: returned: always type: str sample: "2018-04-17T16:56:03.245000+02:00" -''' +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule +class CodeBuildAnsibleAWSError(AnsibleAWSError): + pass -def create_or_update_project(client, params, module): - resp = {} - name = params['name'] - # clean up params - formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) - permitted_create_params = get_boto3_client_method_parameters(client, 'create_project') - permitted_update_params = get_boto3_client_method_parameters(client, 'update_project') - formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params) - formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) +def do_create_project(client, params, formatted_params): + if params["source"] is None or params["artifacts"] is None: + raise CodeBuildAnsibleAWSError( + message="The source and artifacts parameters must be provided when creating a new project. No existing project was found." + ) - # Check if project with that name already exists and if so update existing: - found = describe_project(client=client, name=name, module=module) - changed = False + if params["tags"] is not None: + formatted_params["tags"] = ansible_dict_to_boto3_tag_list( + params["tags"], tag_name_key_name="key", tag_value_key_name="value" + ) + + permitted_create_params = get_boto3_client_method_parameters(client, "create_project") + formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params) - if 'name' in found: - found_project = found - found_tags = found_project.pop('tags', []) - # Support tagging using a dict instead of the list of dicts - if params['resource_tags'] is not None: - if params['purge_tags']: - tags = dict() - else: - tags = boto3_tag_list_to_ansible_dict(found_tags) - tags.update(params['resource_tags']) - formatted_update_params['tags'] = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') - - resp = update_project(client=client, params=formatted_update_params, module=module) - updated_project = resp['project'] - - # Prep both dicts for sensible change comparison: - found_project.pop('lastModified') - updated_project.pop('lastModified') - updated_tags = updated_project.pop('tags', []) - found_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(found_tags) - updated_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(updated_tags) - - if updated_project != found_project: - changed = True - updated_project['tags'] = updated_tags - return resp, changed # Or create new project: try: - if params['source'] is None or params['artifacts'] is None: - module.fail_json( - "The source and artifacts parameters must be provided when " - "creating a new project. No existing project was found.") resp = client.create_project(**formatted_create_params) changed = True return resp, changed except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create CodeBuild project") + raise CodeBuildAnsibleAWSError( + message="Unable to create CodeBuild project", + exception=e, + ) + + +def merge_tags(found_tags, tags, purge_tags): + if purge_tags: + return tags + + merged_tags = boto3_tag_list_to_ansible_dict(found_tags) + merged_tags.update(tags) + return merged_tags + + +def format_tags(tags): + return ansible_dict_to_boto3_tag_list( + tags, + tag_name_key_name="key", + tag_value_key_name="value", + ) + + +def do_update_project(client, params, formatted_params, found_project): + permitted_update_params = get_boto3_client_method_parameters(client, "update_project") + formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) + + found_tags = found_project.pop("tags", []) + if params["tags"] is not None: + formatted_update_params["tags"] = format_tags( + merge_tags(found_tags, params["tags"], params["purge_tags"]), + ) + + resp = update_project(client=client, params=formatted_update_params) + updated_project = resp["project"] + + # Prep both dicts for sensible change comparison: + found_project.pop("lastModified") + updated_project.pop("lastModified") + updated_tags = updated_project.pop("tags", []) + found_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(found_tags) + updated_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(updated_tags) + + changed = updated_project != found_project + + updated_project["tags"] = updated_tags + return resp, changed -def update_project(client, params, module): - name = params['name'] +def create_or_update_project(client, params): + resp = {} + name = params["name"] + # clean up params + formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) + + # Check if project with that name already exists and if so update existing: + found = describe_project(client=client, name=name) + changed = False + + if "name" not in found: + return do_create_project(client, params, formatted_params) + + return do_update_project(client, params, formatted_params, found) + + +def update_project(client, params): + name = params["name"] try: resp = client.update_project(**params) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update CodeBuild project") + raise CodeBuildAnsibleAWSError( + message="Unable to update CodeBuild project", + exception=e, + ) -def delete_project(client, name, module): - found = describe_project(client=client, name=name, module=module) - changed = False - if 'name' in found: - # Mark as changed when a project with that name existed before calling delete - changed = True +def delete_project(client, name): + found = describe_project(client=client, name=name) + if "name" not in found: + return {}, False + try: resp = client.delete_project(name=name) - return resp, changed + return resp, True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete CodeBuild project") + raise CodeBuildAnsibleAWSError( + message="Unable to update CodeBuild project", + exception=e, + ) -def describe_project(client, name, module): +def describe_project(client, name): project = {} try: - projects = client.batch_get_projects(names=[name])['projects'] + projects = client.batch_get_projects(names=[name])["projects"] if len(projects) > 0: project = projects[0] return project except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe CodeBuild projects") + raise CodeBuildAnsibleAWSError( + message="Unable to describe CodeBuild projects", + exception=e, + ) def format_project_result(project_result): formated_result = camel_dict_to_snake_dict(project_result) - project = project_result.get('project', {}) + project = project_result.get("project", {}) if project: - tags = project.get('tags', []) - formated_result['project']['resource_tags'] = boto3_tag_list_to_ansible_dict(tags) - formated_result['ORIGINAL'] = project_result + tags = project.get("tags", []) + formated_result["project"]["resource_tags"] = boto3_tag_list_to_ansible_dict(tags) + formated_result["ORIGINAL"] = project_result return formated_result @@ -443,46 +449,44 @@ def main(): argument_spec = dict( name=dict(required=True), description=dict(), - source=dict(type='dict'), - artifacts=dict(type='dict'), - cache=dict(type='dict'), - environment=dict(type='dict'), + source=dict(type="dict"), + artifacts=dict(type="dict"), + cache=dict(type="dict"), + environment=dict(type="dict"), service_role=dict(), - timeout_in_minutes=dict(type='int', default=60), + timeout_in_minutes=dict(type="int", default=60), encryption_key=dict(no_log=False), - tags=dict(type='list', elements='dict'), - resource_tags=dict(type='dict'), - purge_tags=dict(type='bool', default=True), - vpc_config=dict(type='dict'), - state=dict(choices=['present', 'absent'], default='present') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + vpc_config=dict(type="dict"), + state=dict(choices=["present", "absent"], default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client('codebuild') + client_conn = module.client("codebuild") - state = module.params.get('state') + state = module.params.get("state") changed = False - if module.params['tags']: - module.deprecate( - 'The tags parameter currently uses a non-standard format and has ' - 'been deprecated. In release 6.0.0 this paramater will accept ' - 'a simple key/value pair dictionary instead of the current list ' - 'of dictionaries. It is recommended to migrate to using the ' - 'resource_tags parameter which already accepts the simple dictionary ' - 'format.', version='6.0.0', collection_name='community.aws') - - if state == 'present': - project_result, changed = create_or_update_project( - client=client_conn, - params=module.params, - module=module) - elif state == 'absent': - project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module) + try: + if state == "present": + project_result, changed = create_or_update_project( + client=client_conn, + params=module.params, + ) + elif state == "absent": + project_result, changed = delete_project( + client=client_conn, + name=module.params["name"], + ) + except CodeBuildAnsibleAWSError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) formatted_result = format_project_result(project_result) module.exit_json(changed=changed, **formatted_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/codecommit_repository.py b/ansible_collections/community/aws/plugins/modules/codecommit_repository.py index fce4d15d6..14b08bd88 100644 --- a/ansible_collections/community/aws/plugins/modules/codecommit_repository.py +++ b/ansible_collections/community/aws/plugins/modules/codecommit_repository.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: codecommit_repository version_added: 1.0.0 @@ -17,7 +14,8 @@ description: - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit. - Prior to release 5.0.0 this module was called C(community.aws.aws_codecommit). The usage did not change. -author: Shuang Wang (@ptux) +author: + - Shuang Wang (@ptux) options: name: description: @@ -39,12 +37,12 @@ options: choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -RETURN = ''' +RETURN = r""" repository_metadata: description: "Information about the repository." returned: always @@ -120,9 +118,9 @@ response_metadata: returned: always type: str sample: "0" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create a new repository - community.aws.codecommit_repository: name: repo @@ -132,53 +130,54 @@ EXAMPLES = ''' - community.aws.codecommit_repository: name: repo state: absent -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class CodeCommit(object): def __init__(self, module=None): self._module = module - self._client = self._module.client('codecommit') + self._client = self._module.client("codecommit") self._check_mode = self._module.check_mode def process(self): result = dict(changed=False) - if self._module.params['state'] == 'present': + if self._module.params["state"] == "present": if not self._repository_exists(): if not self._check_mode: result = self._create_repository() - result['changed'] = True + result["changed"] = True else: - metadata = self._get_repository()['repositoryMetadata'] - if not metadata.get('repositoryDescription'): - metadata['repositoryDescription'] = '' - if metadata['repositoryDescription'] != self._module.params['description']: + metadata = self._get_repository()["repositoryMetadata"] + if not metadata.get("repositoryDescription"): + metadata["repositoryDescription"] = "" + if metadata["repositoryDescription"] != self._module.params["description"]: if not self._check_mode: self._update_repository() - result['changed'] = True + result["changed"] = True result.update(self._get_repository()) - if self._module.params['state'] == 'absent' and self._repository_exists(): + if self._module.params["state"] == "absent" and self._repository_exists(): if not self._check_mode: result = self._delete_repository() - result['changed'] = True + result["changed"] = True return result def _repository_exists(self): try: - paginator = self._client.get_paginator('list_repositories') + paginator = self._client.get_paginator("list_repositories") for page in paginator.paginate(): - repositories = page['repositories'] + repositories = page["repositories"] for item in repositories: - if self._module.params['name'] in item.values(): + if self._module.params["name"] in item.values(): return True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't get repository") @@ -187,7 +186,7 @@ class CodeCommit(object): def _get_repository(self): try: result = self._client.get_repository( - repositoryName=self._module.params['name'] + repositoryName=self._module.params["name"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't get repository") @@ -196,8 +195,8 @@ class CodeCommit(object): def _update_repository(self): try: result = self._client.update_repository_description( - repositoryName=self._module.params['name'], - repositoryDescription=self._module.params['description'] + repositoryName=self._module.params["name"], + repositoryDescription=self._module.params["description"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't create repository") @@ -206,8 +205,8 @@ class CodeCommit(object): def _create_repository(self): try: result = self._client.create_repository( - repositoryName=self._module.params['name'], - repositoryDescription=self._module.params['description'] + repositoryName=self._module.params["name"], + repositoryDescription=self._module.params["description"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't create repository") @@ -216,7 +215,7 @@ class CodeCommit(object): def _delete_repository(self): try: result = self._client.delete_repository( - repositoryName=self._module.params['name'] + repositoryName=self._module.params["name"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't delete repository") @@ -226,13 +225,13 @@ class CodeCommit(object): def main(): argument_spec = dict( name=dict(required=True), - state=dict(choices=['present', 'absent'], required=True), - description=dict(default='', aliases=['comment']) + state=dict(choices=["present", "absent"], required=True), + description=dict(default="", aliases=["comment"]), ) ansible_aws_module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) aws_codecommit = CodeCommit(module=ansible_aws_module) @@ -240,5 +239,5 @@ def main(): ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/codepipeline.py b/ansible_collections/community/aws/plugins/modules/codepipeline.py index 5c5935cb9..b1fe60476 100644 --- a/ansible_collections/community/aws/plugins/modules/codepipeline.py +++ b/ansible_collections/community/aws/plugins/modules/codepipeline.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: codepipeline version_added: 1.0.0 @@ -75,16 +72,16 @@ options: choices: ['present', 'absent'] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container) -- community.aws.aws_codepipeline: +- community.aws.codepipeline: name: my_deploy_pipeline role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service artifact_store: @@ -147,9 +144,9 @@ EXAMPLES = r''' FileName: imagedefinitions.json region: us-east-1 state: present -''' +""" -RETURN = r''' +RETURN = r""" pipeline: description: Returns the dictionary describing the CodePipeline configuration. returned: success @@ -194,7 +191,7 @@ pipeline: - This number is auto incremented when CodePipeline params are changed. returned: always type: int -''' +""" import copy @@ -205,20 +202,21 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def create_pipeline(client, name, role_arn, artifact_store, stages, version, module): - pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages} + pipeline_dict = {"name": name, "roleArn": role_arn, "artifactStore": artifact_store, "stages": stages} if version: - pipeline_dict['version'] = version + pipeline_dict["version"] = version try: resp = client.create_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict['name'])) + module.fail_json_aws(e, msg=f"Unable create pipeline {pipeline_dict['name']}") def update_pipeline(client, pipeline_dict, module): @@ -226,7 +224,7 @@ def update_pipeline(client, pipeline_dict, module): resp = client.update_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict['name'])) + module.fail_json_aws(e, msg=f"Unable update pipeline {pipeline_dict['name']}") def delete_pipeline(client, name, module): @@ -234,7 +232,7 @@ def delete_pipeline(client, name, module): resp = client.delete_pipeline(name=name) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable delete pipeline {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable delete pipeline {name}") def describe_pipeline(client, name, version, module): @@ -246,63 +244,69 @@ def describe_pipeline(client, name, version, module): else: pipeline = client.get_pipeline(name=name) return pipeline - except is_boto3_error_code('PipelineNotFoundException'): + except is_boto3_error_code("PipelineNotFoundException"): return pipeline - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - role_arn=dict(required=True, type='str'), - artifact_store=dict(required=True, type='dict'), - stages=dict(required=True, type='list', elements='dict'), - version=dict(type='int'), - state=dict(choices=['present', 'absent'], default='present') + name=dict(required=True, type="str"), + role_arn=dict(required=True, type="str"), + artifact_store=dict(required=True, type="dict"), + stages=dict(required=True, type="list", elements="dict"), + version=dict(type="int"), + state=dict(choices=["present", "absent"], default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client('codepipeline') + client_conn = module.client("codepipeline") - state = module.params.get('state') + state = module.params.get("state") changed = False # Determine if the CodePipeline exists - found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module) + found_code_pipeline = describe_pipeline( + client=client_conn, name=module.params["name"], version=module.params["version"], module=module + ) pipeline_result = {} - if state == 'present': - if 'pipeline' in found_code_pipeline: - pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline']) + if state == "present": + if "pipeline" in found_code_pipeline: + pipeline_dict = copy.deepcopy(found_code_pipeline["pipeline"]) # Update dictionary with provided module params: - pipeline_dict['roleArn'] = module.params['role_arn'] - pipeline_dict['artifactStore'] = module.params['artifact_store'] - pipeline_dict['stages'] = module.params['stages'] - if module.params['version'] is not None: - pipeline_dict['version'] = module.params['version'] + pipeline_dict["roleArn"] = module.params["role_arn"] + pipeline_dict["artifactStore"] = module.params["artifact_store"] + pipeline_dict["stages"] = module.params["stages"] + if module.params["version"] is not None: + pipeline_dict["version"] = module.params["version"] pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module) - if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']): + if compare_policies(found_code_pipeline["pipeline"], pipeline_result["pipeline"]): changed = True else: pipeline_result = create_pipeline( client=client_conn, - name=module.params['name'], - role_arn=module.params['role_arn'], - artifact_store=module.params['artifact_store'], - stages=module.params['stages'], - version=module.params['version'], - module=module) + name=module.params["name"], + role_arn=module.params["role_arn"], + artifact_store=module.params["artifact_store"], + stages=module.params["stages"], + version=module.params["version"], + module=module, + ) changed = True - elif state == 'absent': + elif state == "absent": if found_code_pipeline: - pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module) + pipeline_result = delete_pipeline(client=client_conn, name=module.params["name"], module=module) changed = True module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py b/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py index 7b92abb7f..903d5a5e1 100644 --- a/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py +++ b/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_aggregation_authorization version_added: 1.0.0 @@ -36,12 +33,12 @@ options: type: str required: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get current account ID community.aws.aws_caller_info: register: whoami @@ -49,26 +46,26 @@ EXAMPLES = ''' state: present authorized_account_id: '{{ whoami.account }}' authorized_aws_region: us-east-1 -''' - -RETURN = '''#''' +""" +RETURN = r"""#""" try: import botocore except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def resource_exists(client, module, params): try: - current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] authorization_exists = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), - None + (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), + None, ) if authorization_exists: return True @@ -79,32 +76,32 @@ def resource_exists(client, module, params): def create_resource(client, module, params, result): try: response = client.put_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") def update_resource(client, module, params, result): - current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] current_params = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), - None + (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), + None, ) - del current_params['AggregationAuthorizationArn'] - del current_params['CreationTime'] + del current_params["AggregationAuthorizationArn"] + del current_params["CreationTime"] if params != current_params: try: response = client.put_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") @@ -113,10 +110,10 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: response = client.delete_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization") @@ -125,35 +122,35 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'authorized_account_id': dict(type='str', required=True), - 'authorized_aws_region': dict(type='str', required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "authorized_account_id": dict(type="str", required=True), + "authorized_aws_region": dict(type="str", required=True), }, supports_check_mode=False, ) - result = {'changed': False} + result = {"changed": False} params = { - 'AuthorizedAccountId': module.params.get('authorized_account_id'), - 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'), + "AuthorizedAccountId": module.params.get("authorized_account_id"), + "AuthorizedAwsRegion": module.params.get("authorized_aws_region"), } - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if module.params.get('state') == 'present': + if module.params.get("state") == "present": if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/config_aggregator.py b/ansible_collections/community/aws/plugins/modules/config_aggregator.py index 3dc4c6faa..48771080b 100644 --- a/ansible_collections/community/aws/plugins/modules/config_aggregator.py +++ b/ansible_collections/community/aws/plugins/modules/config_aggregator.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: config_aggregator version_added: 1.0.0 @@ -71,25 +68,25 @@ options: type: dict required: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create cross-account aggregator community.aws.config_aggregator: name: test_config_rule state: present account_sources: account_ids: - - 1234567890 - - 0123456789 - - 9012345678 + - 1234567890 + - 0123456789 + - 9012345678 all_aws_regions: true -''' +""" -RETURN = r'''#''' +RETURN = r"""#""" try: @@ -97,57 +94,64 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def resource_exists(client, module, params): try: aggregator = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] + ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] ) - return aggregator['ConfigurationAggregators'][0] - except is_boto3_error_code('NoSuchConfigurationAggregatorException'): + return aggregator["ConfigurationAggregators"][0] + except is_boto3_error_code("NoSuchConfigurationAggregatorException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: client.put_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'], - AccountAggregationSources=params['AccountAggregationSources'], - OrganizationAggregationSource=params['OrganizationAggregationSource'] + ConfigurationAggregatorName=params["ConfigurationAggregatorName"], + AccountAggregationSources=params["AccountAggregationSources"], + OrganizationAggregationSource=params["OrganizationAggregationSource"], ) - result['changed'] = True - result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") def update_resource(client, module, params, result): - result['changed'] = False + result["changed"] = False current_params = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] - )['ConfigurationAggregators'][0] + ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] + )["ConfigurationAggregators"][0] - if params['AccountAggregationSources'] != current_params.get('AccountAggregationSources', []): - result['changed'] = True + if params["AccountAggregationSources"] != current_params.get("AccountAggregationSources", []): + result["changed"] = True - if params['OrganizationAggregationSource'] != current_params.get('OrganizationAggregationSource', {}): - result['changed'] = True + if params["OrganizationAggregationSource"] != current_params.get("OrganizationAggregationSource", {}): + result["changed"] = True - if result['changed']: + if result["changed"]: try: client.put_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'], - AccountAggregationSources=params['AccountAggregationSources'], - OrganizationAggregationSource=params['OrganizationAggregationSource'] + ConfigurationAggregatorName=params["ConfigurationAggregatorName"], + AccountAggregationSources=params["AccountAggregationSources"], + OrganizationAggregationSource=params["OrganizationAggregationSource"], ) - result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") @@ -155,10 +159,8 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - client.delete_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'] - ) - result['changed'] = True + client.delete_configuration_aggregator(ConfigurationAggregatorName=params["ConfigurationAggregatorName"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator") @@ -167,66 +169,64 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'account_sources': dict(type='list', required=True, elements='dict'), - 'organization_source': dict(type='dict', required=True) + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "account_sources": dict(type="list", required=True, elements="dict"), + "organization_source": dict(type="dict", required=True), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['ConfigurationAggregatorName'] = name - params['AccountAggregationSources'] = [] - if module.params.get('account_sources'): - for i in module.params.get('account_sources'): + params["ConfigurationAggregatorName"] = name + params["AccountAggregationSources"] = [] + if module.params.get("account_sources"): + for i in module.params.get("account_sources"): tmp_dict = {} - if i.get('account_ids'): - tmp_dict['AccountIds'] = i.get('account_ids') - if i.get('aws_regions'): - tmp_dict['AwsRegions'] = i.get('aws_regions') - if i.get('all_aws_regions') is not None: - tmp_dict['AllAwsRegions'] = i.get('all_aws_regions') - params['AccountAggregationSources'].append(tmp_dict) - if module.params.get('organization_source'): - params['OrganizationAggregationSource'] = {} - if module.params.get('organization_source').get('role_arn'): - params['OrganizationAggregationSource'].update({ - 'RoleArn': module.params.get('organization_source').get('role_arn') - }) - if module.params.get('organization_source').get('aws_regions'): - params['OrganizationAggregationSource'].update({ - 'AwsRegions': module.params.get('organization_source').get('aws_regions') - }) - if module.params.get('organization_source').get('all_aws_regions') is not None: - params['OrganizationAggregationSource'].update({ - 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions') - }) - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + if i.get("account_ids"): + tmp_dict["AccountIds"] = i.get("account_ids") + if i.get("aws_regions"): + tmp_dict["AwsRegions"] = i.get("aws_regions") + if i.get("all_aws_regions") is not None: + tmp_dict["AllAwsRegions"] = i.get("all_aws_regions") + params["AccountAggregationSources"].append(tmp_dict) + if module.params.get("organization_source"): + params["OrganizationAggregationSource"] = {} + if module.params.get("organization_source").get("role_arn"): + params["OrganizationAggregationSource"].update( + {"RoleArn": module.params.get("organization_source").get("role_arn")} + ) + if module.params.get("organization_source").get("aws_regions"): + params["OrganizationAggregationSource"].update( + {"AwsRegions": module.params.get("organization_source").get("aws_regions")} + ) + if module.params.get("organization_source").get("all_aws_regions") is not None: + params["OrganizationAggregationSource"].update( + {"AllAwsRegions": module.params.get("organization_source").get("all_aws_regions")} + ) + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py b/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py index 371bd6685..1c3a3acdc 100644 --- a/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py +++ b/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_delivery_channel version_added: 1.0.0 @@ -39,6 +36,10 @@ options: description: - The prefix for the specified Amazon S3 bucket. type: str + kms_key_arn: + description: + - The ARN of a KMS key used to encrypt objects delivered by Config. The key must belong to the same region as the destination S3 bucket. + type: str sns_topic_arn: description: - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes. @@ -49,22 +50,31 @@ options: choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" + +EXAMPLES = r""" +- name: Create a delivery channel for AWS Config + community.aws.config_delivery_channel: + name: test_delivery_channel + state: present + s3_bucket: 'test_aws_config_bucket' + sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' + delivery_frequency: 'Twelve_Hours' -EXAMPLES = ''' -- name: Create Delivery Channel for AWS Config +- name: Create a delivery channel with encrypted objects community.aws.config_delivery_channel: name: test_delivery_channel state: present s3_bucket: 'test_aws_config_bucket' + kms_key_arn: 'arn:aws:kms:us-east-1:123456789012:key/160f41cb-e660-4fa0-8bf6-976f53bf7851' sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' delivery_frequency: 'Twelve_Hours' -''' +""" -RETURN = '''#''' +RETURN = r"""#""" try: @@ -74,28 +84,31 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule # this waits for an IAM role to become fully available, at the cost of # taking a long time to fail when the IAM role/policy really is invalid retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff( - catch_extra_error_codes=['InsufficientDeliveryPolicyException'], + catch_extra_error_codes=["InsufficientDeliveryPolicyException"], ) def resource_exists(client, module, params): try: channel = client.describe_delivery_channels( - DeliveryChannelNames=[params['name']], + DeliveryChannelNames=[params["name"]], aws_retry=True, ) - return channel['DeliveryChannels'][0] - except is_boto3_error_code('NoSuchDeliveryChannelException'): + return channel["DeliveryChannels"][0] + except is_boto3_error_code("NoSuchDeliveryChannelException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -106,49 +119,63 @@ def create_resource(client, module, params, result): )( DeliveryChannel=params, ) - result['changed'] = True - result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result - except is_boto3_error_code('InvalidS3KeyPrefixException') as e: - module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") - except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") + except is_boto3_error_code("InvalidS3KeyPrefixException") as e: + module.fail_json_aws( + e, + msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix", + ) + except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="Couldn't create AWS Config delivery channel", + ) def update_resource(client, module, params, result): current_params = client.describe_delivery_channels( - DeliveryChannelNames=[params['name']], + DeliveryChannelNames=[params["name"]], aws_retry=True, ) - if params != current_params['DeliveryChannels'][0]: + if params != current_params["DeliveryChannels"][0]: try: retry_unavailable_iam_on_put_delivery( client.put_delivery_channel, )( DeliveryChannel=params, ) - result['changed'] = True - result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result - except is_boto3_error_code('InvalidS3KeyPrefixException') as e: + except is_boto3_error_code("InvalidS3KeyPrefixException") as e: module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") - except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") def delete_resource(client, module, params, result): try: - response = client.delete_delivery_channel( - DeliveryChannelName=params['name'] - ) - result['changed'] = True + response = client.delete_delivery_channel(DeliveryChannelName=params["name"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel") @@ -157,62 +184,61 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 's3_bucket': dict(type='str', required=True), - 's3_prefix': dict(type='str'), - 'sns_topic_arn': dict(type='str'), - 'delivery_frequency': dict( - type='str', + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "s3_bucket": dict(type="str", required=True), + "s3_prefix": dict(type="str"), + "kms_key_arn": dict(type="str", no_log=True), + "sns_topic_arn": dict(type="str"), + "delivery_frequency": dict( + type="str", choices=[ - 'One_Hour', - 'Three_Hours', - 'Six_Hours', - 'Twelve_Hours', - 'TwentyFour_Hours' - ] + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], ), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['name'] = name - if module.params.get('s3_bucket'): - params['s3BucketName'] = module.params.get('s3_bucket') - if module.params.get('s3_prefix'): - params['s3KeyPrefix'] = module.params.get('s3_prefix') - if module.params.get('sns_topic_arn'): - params['snsTopicARN'] = module.params.get('sns_topic_arn') - if module.params.get('delivery_frequency'): - params['configSnapshotDeliveryProperties'] = { - 'deliveryFrequency': module.params.get('delivery_frequency') - } - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + params["name"] = name + if module.params.get("s3_bucket"): + params["s3BucketName"] = module.params.get("s3_bucket") + if module.params.get("s3_prefix"): + params["s3KeyPrefix"] = module.params.get("s3_prefix") + if module.params.get("kms_key_arn"): + params["s3KmsKeyArn"] = module.params.get("kms_key_arn") + if module.params.get("sns_topic_arn"): + params["snsTopicARN"] = module.params.get("sns_topic_arn") + if module.params.get("delivery_frequency"): + params["configSnapshotDeliveryProperties"] = {"deliveryFrequency": module.params.get("delivery_frequency")} + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/config_recorder.py b/ansible_collections/community/aws/plugins/modules/config_recorder.py index d90ce46cd..510bbaa23 100644 --- a/ansible_collections/community/aws/plugins/modules/config_recorder.py +++ b/ansible_collections/community/aws/plugins/modules/config_recorder.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_recorder version_added: 1.0.0 @@ -62,23 +59,23 @@ options: - Before you can set this option, you must set I(all_supported=false). type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Configuration Recorder for AWS Config community.aws.config_recorder: name: test_configuration_recorder state: present role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' recording_group: - all_supported: true - include_global_types: true -''' + all_supported: true + include_global_types: true +""" -RETURN = '''#''' +RETURN = r"""#""" try: @@ -88,47 +85,43 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def resource_exists(client, module, params): try: - recorder = client.describe_configuration_recorders( - ConfigurationRecorderNames=[params['name']] - ) - return recorder['ConfigurationRecorders'][0] - except is_boto3_error_code('NoSuchConfigurationRecorderException'): + recorder = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) + return recorder["ConfigurationRecorders"][0] + except is_boto3_error_code("NoSuchConfigurationRecorderException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: - response = client.put_configuration_recorder( - ConfigurationRecorder=params - ) - result['changed'] = True - result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + response = client.put_configuration_recorder(ConfigurationRecorder=params) + result["changed"] = True + result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder") def update_resource(client, module, params, result): - current_params = client.describe_configuration_recorders( - ConfigurationRecorderNames=[params['name']] - ) + current_params = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) - if params != current_params['ConfigurationRecorders'][0]: + if params != current_params["ConfigurationRecorders"][0]: try: - response = client.put_configuration_recorder( - ConfigurationRecorder=params - ) - result['changed'] = True - result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + response = client.put_configuration_recorder(ConfigurationRecorder=params) + result["changed"] = True + result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder") @@ -136,77 +129,68 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - response = client.delete_configuration_recorder( - ConfigurationRecorderName=params['name'] - ) - result['changed'] = True + response = client.delete_configuration_recorder(ConfigurationRecorderName=params["name"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder") def main(): - module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'role_arn': dict(type='str'), - 'recording_group': dict(type='dict'), + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "role_arn": dict(type="str"), + "recording_group": dict(type="dict"), }, supports_check_mode=False, required_if=[ - ('state', 'present', ['role_arn', 'recording_group']), + ("state", "present", ["role_arn", "recording_group"]), ], ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['name'] = name - if module.params.get('role_arn'): - params['roleARN'] = module.params.get('role_arn') - if module.params.get('recording_group'): - params['recordingGroup'] = {} - if module.params.get('recording_group').get('all_supported') is not None: - params['recordingGroup'].update({ - 'allSupported': module.params.get('recording_group').get('all_supported') - }) - if module.params.get('recording_group').get('include_global_types') is not None: - params['recordingGroup'].update({ - 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types') - }) - if module.params.get('recording_group').get('resource_types'): - params['recordingGroup'].update({ - 'resourceTypes': module.params.get('recording_group').get('resource_types') - }) + params["name"] = name + if module.params.get("role_arn"): + params["roleARN"] = module.params.get("role_arn") + if module.params.get("recording_group"): + params["recordingGroup"] = {} + if module.params.get("recording_group").get("all_supported") is not None: + params["recordingGroup"].update({"allSupported": module.params.get("recording_group").get("all_supported")}) + if module.params.get("recording_group").get("include_global_types") is not None: + params["recordingGroup"].update( + {"includeGlobalResourceTypes": module.params.get("recording_group").get("include_global_types")} + ) + if module.params.get("recording_group").get("resource_types"): + params["recordingGroup"].update( + {"resourceTypes": module.params.get("recording_group").get("resource_types")} + ) else: - params['recordingGroup'].update({ - 'resourceTypes': [] - }) + params["recordingGroup"].update({"resourceTypes": []}) - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/config_rule.py b/ansible_collections/community/aws/plugins/modules/config_rule.py index d5cb717fd..b86a528dd 100644 --- a/ansible_collections/community/aws/plugins/modules/config_rule.py +++ b/ansible_collections/community/aws/plugins/modules/config_rule.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_rule version_added: 1.0.0 @@ -86,27 +83,26 @@ options: choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Config Rule for AWS Config community.aws.config_rule: name: test_config_rule state: present description: 'This AWS Config rule checks for public write access on S3 buckets' scope: - compliance_types: - - 'AWS::S3::Bucket' + compliance_types: + - 'AWS::S3::Bucket' source: - owner: AWS - identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED' - -''' + owner: AWS + identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED' +""" -RETURN = '''#''' +RETURN = r"""#""" try: @@ -116,30 +112,32 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def rule_exists(client, module, params): try: rule = client.describe_config_rules( - ConfigRuleNames=[params['ConfigRuleName']], + ConfigRuleNames=[params["ConfigRuleName"]], aws_retry=True, ) - return rule['ConfigRules'][0] - except is_boto3_error_code('NoSuchConfigRuleException'): + return rule["ConfigRules"][0] + except is_boto3_error_code("NoSuchConfigRuleException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: - client.put_config_rule( - ConfigRule=params - ) - result['changed'] = True + client.put_config_rule(ConfigRule=params) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config rule") @@ -147,21 +145,19 @@ def create_resource(client, module, params, result): def update_resource(client, module, params, result): current_params = client.describe_config_rules( - ConfigRuleNames=[params['ConfigRuleName']], + ConfigRuleNames=[params["ConfigRuleName"]], aws_retry=True, ) - del current_params['ConfigRules'][0]['ConfigRuleArn'] - del current_params['ConfigRules'][0]['ConfigRuleId'] - del current_params['ConfigRules'][0]['EvaluationModes'] + del current_params["ConfigRules"][0]["ConfigRuleArn"] + del current_params["ConfigRules"][0]["ConfigRuleId"] + del current_params["ConfigRules"][0]["EvaluationModes"] - if params != current_params['ConfigRules'][0]: + if params != current_params["ConfigRules"][0]: try: - client.put_config_rule( - ConfigRule=params - ) - result['changed'] = True - result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params)) + client.put_config_rule(ConfigRule=params) + result["changed"] = True + result["rule"] = camel_dict_to_snake_dict(rule_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config rule") @@ -170,11 +166,11 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: response = client.delete_config_rule( - ConfigRuleName=params['ConfigRuleName'], + ConfigRuleName=params["ConfigRuleName"], aws_retry=True, ) - result['changed'] = True - result['rule'] = {} + result["changed"] = True + result["rule"] = {} return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config rule") @@ -183,93 +179,105 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'description': dict(type='str'), - 'scope': dict(type='dict'), - 'source': dict(type='dict', required=True), - 'input_parameters': dict(type='str'), - 'execution_frequency': dict( - type='str', + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "description": dict(type="str"), + "scope": dict(type="dict"), + "source": dict(type="dict", required=True), + "input_parameters": dict(type="str"), + "execution_frequency": dict( + type="str", choices=[ - 'One_Hour', - 'Three_Hours', - 'Six_Hours', - 'Twelve_Hours', - 'TwentyFour_Hours' - ] + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], ), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - resource_type = module.params.get('resource_type') - state = module.params.get('state') + name = module.params.get("name") + resource_type = module.params.get("resource_type") + state = module.params.get("state") params = {} if name: - params['ConfigRuleName'] = name - if module.params.get('description'): - params['Description'] = module.params.get('description') - if module.params.get('scope'): - params['Scope'] = {} - if module.params.get('scope').get('compliance_types'): - params['Scope'].update({ - 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types') - }) - if module.params.get('scope').get('tag_key'): - params['Scope'].update({ - 'TagKey': module.params.get('scope').get('tag_key') - }) - if module.params.get('scope').get('tag_value'): - params['Scope'].update({ - 'TagValue': module.params.get('scope').get('tag_value') - }) - if module.params.get('scope').get('compliance_id'): - params['Scope'].update({ - 'ComplianceResourceId': module.params.get('scope').get('compliance_id') - }) - if module.params.get('source'): - params['Source'] = {} - if module.params.get('source').get('owner'): - params['Source'].update({ - 'Owner': module.params.get('source').get('owner') - }) - if module.params.get('source').get('identifier'): - params['Source'].update({ - 'SourceIdentifier': module.params.get('source').get('identifier') - }) - if module.params.get('source').get('details'): - params['Source'].update({ - 'SourceDetails': module.params.get('source').get('details') - }) - if module.params.get('input_parameters'): - params['InputParameters'] = module.params.get('input_parameters') - if module.params.get('execution_frequency'): - params['MaximumExecutionFrequency'] = module.params.get('execution_frequency') - params['ConfigRuleState'] = 'ACTIVE' + params["ConfigRuleName"] = name + if module.params.get("description"): + params["Description"] = module.params.get("description") + if module.params.get("scope"): + params["Scope"] = {} + if module.params.get("scope").get("compliance_types"): + params["Scope"].update( + { + "ComplianceResourceTypes": module.params.get("scope").get("compliance_types"), + } + ) + if module.params.get("scope").get("tag_key"): + params["Scope"].update( + { + "TagKey": module.params.get("scope").get("tag_key"), + } + ) + if module.params.get("scope").get("tag_value"): + params["Scope"].update( + { + "TagValue": module.params.get("scope").get("tag_value"), + } + ) + if module.params.get("scope").get("compliance_id"): + params["Scope"].update( + { + "ComplianceResourceId": module.params.get("scope").get("compliance_id"), + } + ) + if module.params.get("source"): + params["Source"] = {} + if module.params.get("source").get("owner"): + params["Source"].update( + { + "Owner": module.params.get("source").get("owner"), + } + ) + if module.params.get("source").get("identifier"): + params["Source"].update( + { + "SourceIdentifier": module.params.get("source").get("identifier"), + } + ) + if module.params.get("source").get("details"): + params["Source"].update( + { + "SourceDetails": module.params.get("source").get("details"), + } + ) + if module.params.get("input_parameters"): + params["InputParameters"] = module.params.get("input_parameters") + if module.params.get("execution_frequency"): + params["MaximumExecutionFrequency"] = module.params.get("execution_frequency") + params["ConfigRuleState"] = "ACTIVE" - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) existing_rule = rule_exists(client, module, params) - if state == 'present': + if state == "present": if not existing_rule: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if existing_rule: delete_resource(client, module, params, result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/data_pipeline.py b/ansible_collections/community/aws/plugins/modules/data_pipeline.py index fc441c10c..85849324f 100644 --- a/ansible_collections/community/aws/plugins/modules/data_pipeline.py +++ b/ansible_collections/community/aws/plugins/modules/data_pipeline.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: data_pipeline version_added: 1.0.0 @@ -15,10 +12,6 @@ author: - Raghu Udiyar (@raags) <raghusiddarth@gmail.com> - Sloane Hertel (@s-hertel) <shertel@redhat.com> short_description: Create and manage AWS Datapipelines -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 description: - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) given to the datapipeline. @@ -126,9 +119,13 @@ options: type: dict default: {} aliases: ['resource_tags'] -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create pipeline @@ -147,23 +144,30 @@ EXAMPLES = r''' - community.aws.data_pipeline: name: test-dp objects: - - "id": "DefaultSchedule" - "name": "Every 1 day" - "fields": + - id: "DefaultSchedule" + name: "Every 1 day" + fields: - "key": "period" "stringValue": "1 days" - "key": "type" "stringValue": "Schedule" - "key": "startAt" "stringValue": "FIRST_ACTIVATION_DATE_TIME" - - "id": "Default" - "name": "Default" - "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" }, - { "key": "role", "stringValue": "DataPipelineDefaultRole" }, - { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" }, - { "key": "scheduleType", "stringValue": "cron" }, - { "key": "schedule", "refValue": "DefaultSchedule" }, - { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ] + - id: "Default" + name: "Default" + fields: + - "key": "resourceRole" + "stringValue": "my_resource_role" + - "key": "role" + "stringValue": "DataPipelineDefaultRole" + - "key": "pipelineLogUri" + "stringValue": "s3://my_s3_log.txt" + - "key": "scheduleType" + "stringValue": "cron" + - "key": "schedule" + "refValue": "DefaultSchedule" + - "key": "failureAndRerunMode" + "stringValue": "CASCADE" state: active # Activate pipeline @@ -177,10 +181,9 @@ EXAMPLES = r''' name: test-dp region: us-west-2 state: absent +""" -''' - -RETURN = r''' +RETURN = r""" changed: description: whether the data pipeline has been modified type: bool @@ -195,7 +198,7 @@ result: data_pipeline will be an empty dict. The msg describes the status of the operation. returned: always type: dict -''' +""" import hashlib import json @@ -209,15 +212,15 @@ except ImportError: from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] -DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING'] -DP_ACTIVATING_STATE = 'ACTIVATING' -DP_DEACTIVATING_STATE = 'DEACTIVATING' -PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$' +DP_ACTIVE_STATES = ["ACTIVE", "SCHEDULED"] +DP_INACTIVE_STATES = ["INACTIVE", "PENDING", "FINISHED", "DELETING"] +DP_ACTIVATING_STATE = "ACTIVATING" +DP_DEACTIVATING_STATE = "DEACTIVATING" +PIPELINE_DOESNT_EXIST = "^.*Pipeline with id: {0} does not exist$" class DataPipelineNotFound(Exception): @@ -238,9 +241,9 @@ def pipeline_id(client, name): """ pipelines = client.list_pipelines() - for dp in pipelines['pipelineIdList']: - if dp['name'] == name: - return dp['id'] + for dp in pipelines["pipelineIdList"]: + if dp["name"] == name: + return dp["id"] raise DataPipelineNotFound @@ -254,7 +257,7 @@ def pipeline_description(client, dp_id): """ try: return client.describe_pipelines(pipelineIds=[dp_id]) - except is_boto3_error_code(['PipelineNotFoundException', 'PipelineDeletedException']): + except is_boto3_error_code(["PipelineNotFoundException", "PipelineDeletedException"]): raise DataPipelineNotFound @@ -270,10 +273,10 @@ def pipeline_field(client, dp_id, field): """ dp_description = pipeline_description(client, dp_id) - for field_key in dp_description['pipelineDescriptionList'][0]['fields']: - if field_key['key'] == field: - return field_key['stringValue'] - raise KeyError("Field key {0} not found!".format(field)) + for field_key in dp_description["pipelineDescriptionList"][0]["fields"]: + if field_key["key"] == field: + return field_key["stringValue"] + raise KeyError(f"Field key {field} not found!") def run_with_timeout(timeout, func, *func_args, **func_kwargs): @@ -345,70 +348,70 @@ def pipeline_exists_timeout(client, dp_id, timeout): def activate_pipeline(client, module): - """Activates pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Activates pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + module.fail_json(msg=f"Data Pipeline {dp_name} not found") if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES: changed = False else: try: client.activate_pipeline(pipelineId=dp_id) - except is_boto3_error_code('InvalidRequestException'): + except is_boto3_error_code("InvalidRequestException"): module.fail_json(msg="You need to populate your pipeline before activation.") try: - pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, - timeout=timeout) + pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, timeout=timeout) except TimeOutException: if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": # activated but completed more rapidly than it was checked pass else: - module.fail_json(msg=('Data Pipeline {0} failed to activate ' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to activate within timeout {timeout} seconds", + ) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} activated.'.format(dp_name)} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} activated.", + } return (changed, result) def deactivate_pipeline(client, module): - """Deactivates pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Deactivates pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + module.fail_json(msg=f"Data Pipeline {dp_name} not found") if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES: changed = False else: client.deactivate_pipeline(pipelineId=dp_id) try: - pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, - timeout=timeout) + pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, timeout=timeout) except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to deactivate' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to deactivate within timeout {timeout} seconds", + ) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} deactivated.", + } return (changed, result) @@ -422,11 +425,9 @@ def _delete_dp_with_check(dp_id, client, timeout): def delete_pipeline(client, module): - """Deletes pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Deletes pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) @@ -435,10 +436,13 @@ def delete_pipeline(client, module): except DataPipelineNotFound: changed = False except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to delete' - 'within timeout {1} seconds').format(dp_name, timeout)) - result = {'data_pipeline': {}, - 'msg': 'Data Pipeline {0} deleted'.format(dp_name)} + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to delete within timeout {timeout} seconds", + ) + result = { + "data_pipeline": {}, + "msg": f"Data Pipeline {dp_name} deleted", + } return (changed, result) @@ -446,14 +450,14 @@ def delete_pipeline(client, module): def build_unique_id(module): data = dict(module.params) # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline - [data.pop(each, None) for each in ('objects', 'timeout')] + [data.pop(each, None) for each in ("objects", "timeout")] json_data = json.dumps(data, sort_keys=True).encode("utf-8") hashed_data = hashlib.md5(json_data).hexdigest() return hashed_data def format_tags(tags): - """ Reformats tags + """Reformats tags :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3}) :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}]) @@ -463,16 +467,16 @@ def format_tags(tags): def get_result(client, dp_id): - """ Get the current state of the data pipeline and reformat it to snake_case for exit_json + """Get the current state of the data pipeline and reformat it to snake_case for exit_json :param object client: boto3 datapipeline client :param string dp_id: pipeline id :returns: reformatted dict of pipeline description - """ + """ # pipeline_description returns a pipelineDescriptionList of length 1 # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict) - dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0] + dp = pipeline_description(client, dp_id)["pipelineDescriptionList"][0] # Get uniqueId and pipelineState in fields to add to the exit_json result dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId") @@ -489,8 +493,7 @@ def get_result(client, dp_id): def diff_pipeline(client, module, objects, unique_id, dp_name): - """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated - """ + """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated""" result = {} changed = False create_dp = False @@ -506,16 +509,18 @@ def diff_pipeline(client, module, objects, unique_id, dp_name): create_dp = True # Unique ids are the same - check if pipeline needs modification else: - dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects'] + dp_objects = client.get_pipeline_definition(pipelineId=dp_id)["pipelineObjects"] # Definition needs to be updated if dp_objects != objects: changed, msg = define_pipeline(client, module, objects, dp_id) # No changes else: - msg = 'Data Pipeline {0} is present'.format(dp_name) + msg = f"Data Pipeline {dp_name} is present" data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': msg} + result = { + "data_pipeline": data_pipeline, + "msg": msg, + } except DataPipelineNotFound: create_dp = True @@ -523,30 +528,32 @@ def diff_pipeline(client, module, objects, unique_id, dp_name): def define_pipeline(client, module, objects, dp_id): - """Puts pipeline definition - - """ - dp_name = module.params.get('name') + """Puts pipeline definition""" + dp_name = module.params.get("name") if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": - msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name) + msg = f"Data Pipeline {dp_name} is unable to be updated while in state FINISHED." changed = False elif objects: - parameters = module.params.get('parameters') - values = module.params.get('values') + parameters = module.params.get("parameters") + values = module.params.get("values") try: - client.put_pipeline_definition(pipelineId=dp_id, - pipelineObjects=objects, - parameterObjects=parameters, - parameterValues=values) - msg = 'Data Pipeline {0} has been updated.'.format(dp_name) + client.put_pipeline_definition( + pipelineId=dp_id, pipelineObjects=objects, parameterObjects=parameters, parameterValues=values + ) + msg = f"Data Pipeline {dp_name} has been updated." changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to put the definition for pipeline {0}. Check that string/reference fields" - "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" - "objects".format(dp_name)) + module.fail_json_aws( + e, + msg=( + f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" + "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" + "objects" + ), + ) else: changed = False msg = "" @@ -555,14 +562,12 @@ def define_pipeline(client, module, objects, dp_id): def create_pipeline(client, module): - """Creates datapipeline. Uses uniqueId to achieve idempotency. - - """ - dp_name = module.params.get('name') - objects = module.params.get('objects', None) - description = module.params.get('description', '') - tags = module.params.get('tags') - timeout = module.params.get('timeout') + """Creates datapipeline. Uses uniqueId to achieve idempotency.""" + dp_name = module.params.get("name") + objects = module.params.get("objects", None) + description = module.params.get("description", "") + tags = module.params.get("tags") + timeout = module.params.get("timeout") unique_id = build_unique_id(module) create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name) @@ -576,24 +581,27 @@ def create_pipeline(client, module): # Make pipeline try: tags = format_tags(tags) - dp = client.create_pipeline(name=dp_name, - uniqueId=unique_id, - description=description, - tags=tags) - dp_id = dp['pipelineId'] + dp = client.create_pipeline(name=dp_name, uniqueId=unique_id, description=description, tags=tags) + dp_id = dp["pipelineId"] pipeline_exists_timeout(client, dp_id, timeout) except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to create' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to create within timeout {timeout} seconds", + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create the data pipeline {0}.".format(dp_name)) + module.fail_json_aws( + e, + msg=f"Failed to create the data pipeline {dp_name}.", + ) # Put pipeline definition changed, msg = define_pipeline(client, module, objects, dp_id) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} created." + msg, + } return (changed, result) @@ -601,34 +609,33 @@ def create_pipeline(client, module): def main(): argument_spec = dict( name=dict(required=True), - description=dict(required=False, default=''), - objects=dict(required=False, type='list', default=[], elements='dict'), - parameters=dict(required=False, type='list', default=[], elements='dict'), - timeout=dict(required=False, type='int', default=300), - state=dict(default='present', choices=['present', 'absent', - 'active', 'inactive']), - tags=dict(required=False, type='dict', default={}, aliases=['resource_tags']), - values=dict(required=False, type='list', default=[], elements='dict'), + description=dict(required=False, default=""), + objects=dict(required=False, type="list", default=[], elements="dict"), + parameters=dict(required=False, type="list", default=[], elements="dict"), + timeout=dict(required=False, type="int", default=300), + state=dict(default="present", choices=["present", "absent", "active", "inactive"]), + tags=dict(required=False, type="dict", default={}, aliases=["resource_tags"]), + values=dict(required=False, type="list", default=[], elements="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) try: - client = module.client('datapipeline') + client = module.client("datapipeline") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": changed, result = create_pipeline(client, module) - elif state == 'absent': + elif state == "absent": changed, result = delete_pipeline(client, module) - elif state == 'active': + elif state == "active": changed, result = activate_pipeline(client, module) - elif state == 'inactive': + elif state == "inactive": changed, result = deactivate_pipeline(client, module) module.exit_json(result=result, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py b/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py index 45180ac6c..1e99fd5ea 100644 --- a/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py +++ b/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py @@ -1,15 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: directconnect_confirm_connection short_description: Confirms the creation of a hosted DirectConnect connection @@ -21,10 +16,6 @@ description: The usage did not change. author: - "Matt Traynham (@mtraynham)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: description: @@ -36,9 +27,13 @@ options: - The ID of the Direct Connect connection. - One of I(connection_id) or I(name) must be specified. type: str -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # confirm a Direct Connect by name - name: confirm the connection id @@ -49,29 +44,31 @@ EXAMPLES = ''' - name: confirm the connection id community.aws.directconnect_confirm_connection: connection_id: dxcon-xxxxxxxx -''' +""" -RETURN = ''' +RETURN = r""" connection_state: description: The state of the connection. returned: always type: str sample: pending -''' +""" import traceback try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} @@ -84,26 +81,28 @@ def describe_connections(client, params): def find_connection_id(client, connection_id=None, connection_name=None): params = {} if connection_id: - params['connectionId'] = connection_id + params["connectionId"] = connection_id try: response = describe_connections(client, params) except (BotoCoreError, ClientError) as e: if connection_id: - msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + msg = f"Failed to describe DirectConnect ID {connection_id}" else: msg = "Failed to describe DirectConnect connections" - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=msg, + last_traceback=traceback.format_exc(), + exception=e, + ) match = [] - if len(response.get('connections', [])) == 1 and connection_id: - if response['connections'][0]['connectionState'] != 'deleted': - match.append(response['connections'][0]['connectionId']) + if len(response.get("connections", [])) == 1 and connection_id: + if response["connections"][0]["connectionState"] != "deleted": + match.append(response["connections"][0]["connectionId"]) - for conn in response.get('connections', []): - if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': - match.append(conn['connectionId']) + for conn in response.get("connections", []): + if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": + match.append(conn["connectionId"]) if len(match) == 1: return match[0] @@ -114,34 +113,33 @@ def find_connection_id(client, connection_id=None, connection_name=None): def get_connection_state(client, connection_id): try: response = describe_connections(client, dict(connectionId=connection_id)) - return response['connections'][0]['connectionState'] + return response["connections"][0]["connectionState"] except (BotoCoreError, ClientError, IndexError) as e: - raise DirectConnectError(msg="Failed to describe DirectConnect connection {0} state".format(connection_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to describe DirectConnect connection {connection_id} state", + last_traceback=traceback.format_exc(), + exception=e, + ) def main(): - argument_spec = dict( - connection_id=dict(), - name=dict() + argument_spec = dict(connection_id=dict(), name=dict()) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[["connection_id", "name"]], + required_one_of=[["connection_id", "name"]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['connection_id', 'name']], - required_one_of=[['connection_id', 'name']]) - client = module.client('directconnect') + client = module.client("directconnect") - connection_id = module.params['connection_id'] - connection_name = module.params['name'] + connection_id = module.params["connection_id"] + connection_name = module.params["name"] changed = False connection_state = None try: - connection_id = find_connection_id(client, - connection_id, - connection_name) + connection_id = find_connection_id(client, connection_id, connection_name) connection_state = get_connection_state(client, connection_id) - if connection_state == 'ordering': + if connection_state == "ordering": client.confirm_connection(connectionId=connection_id) changed = True connection_state = get_connection_state(client, connection_id) @@ -154,5 +152,5 @@ def main(): module.exit_json(changed=changed, connection_state=connection_state) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_connection.py b/ansible_collections/community/aws/plugins/modules/directconnect_connection.py index 28d86717d..40e9bc913 100644 --- a/ansible_collections/community/aws/plugins/modules/directconnect_connection.py +++ b/ansible_collections/community/aws/plugins/modules/directconnect_connection.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: directconnect_connection version_added: 1.0.0 @@ -19,10 +17,6 @@ description: The usage did not change. author: - "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: state: description: @@ -68,9 +62,13 @@ options: - By default this will not happen. This option must be explicitly set to C(true) to change I(bandwith) or I(location). type: bool default: false -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" # create a Direct Connect connection - community.aws.directconnect_connection: @@ -102,7 +100,7 @@ EXAMPLES = """ name: ansible-test-connection """ -RETURN = """ +RETURN = r""" connection: description: The attributes of the direct connect connection. type: complex @@ -158,18 +156,20 @@ connection: import traceback try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import associate_connection_and_lag from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} @@ -181,31 +181,29 @@ def connection_status(client, connection_id): def connection_exists(client, connection_id=None, connection_name=None, verify=True): params = {} if connection_id: - params['connectionId'] = connection_id + params["connectionId"] = connection_id try: response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: if connection_id: - msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + msg = f"Failed to describe DirectConnect ID {connection_id}" else: msg = "Failed to describe DirectConnect connections" - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) match = [] connection = [] # look for matching connections - if len(response.get('connections', [])) == 1 and connection_id: - if response['connections'][0]['connectionState'] != 'deleted': - match.append(response['connections'][0]['connectionId']) - connection.extend(response['connections']) + if len(response.get("connections", [])) == 1 and connection_id: + if response["connections"][0]["connectionState"] != "deleted": + match.append(response["connections"][0]["connectionId"]) + connection.extend(response["connections"]) - for conn in response.get('connections', []): - if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': - match.append(conn['connectionId']) + for conn in response.get("connections", []): + if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": + match.append(conn["connectionId"]) connection.append(conn) # verifying if the connections exists; if true, return connection identifier, otherwise return False @@ -215,33 +213,35 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T return False # not verifying if the connection exists; just return current connection info elif len(connection) == 1: - return {'connection': connection[0]} - return {'connection': {}} + return {"connection": connection[0]} + return {"connection": {}} def create_connection(client, location, bandwidth, name, lag_id): if not name: raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.") params = { - 'location': location, - 'bandwidth': bandwidth, - 'connectionName': name, + "location": location, + "bandwidth": bandwidth, + "connectionName": name, } if lag_id: - params['lagId'] = lag_id + params["lagId"] = lag_id try: connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: - raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name), - last_traceback=traceback.format_exc(), - exception=e) - return connection['connectionId'] + raise DirectConnectError( + msg=f"Failed to create DirectConnect connection {name}", + last_traceback=traceback.format_exc(), + exception=e, + ) + return connection["connectionId"] def changed_properties(current_status, location, bandwidth): - current_bandwidth = current_status['bandwidth'] - current_location = current_status['location'] + current_bandwidth = current_status["bandwidth"] + current_location = current_status["location"] return current_bandwidth != bandwidth or current_location != location @@ -249,10 +249,10 @@ def changed_properties(current_status, location, bandwidth): @AWSRetry.jittered_backoff(**retry_params) def update_associations(client, latest_state, connection_id, lag_id): changed = False - if 'lagId' in latest_state and lag_id != latest_state['lagId']: - disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId']) + if "lagId" in latest_state and lag_id != latest_state["lagId"]: + disassociate_connection_and_lag(client, connection_id, lag_id=latest_state["lagId"]) changed = True - if (changed and lag_id) or (lag_id and 'lagId' not in latest_state): + if (changed and lag_id) or (lag_id and "lagId" not in latest_state): associate_connection_and_lag(client, connection_id, lag_id) changed = True return changed @@ -261,16 +261,18 @@ def update_associations(client, latest_state, connection_id, lag_id): def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update): # the connection is found; get the latest state and see if it needs to be updated if connection_id: - latest_state = connection_status(client, connection_id=connection_id)['connection'] + latest_state = connection_status(client, connection_id=connection_id)["connection"] if changed_properties(latest_state, location, bandwidth) and forced_update: ensure_absent(client, connection_id) - return ensure_present(client=client, - connection_id=None, - connection_name=connection_name, - location=location, - bandwidth=bandwidth, - lag_id=lag_id, - forced_update=forced_update) + return ensure_present( + client=client, + connection_id=None, + connection_name=connection_name, + location=location, + bandwidth=bandwidth, + lag_id=lag_id, + forced_update=forced_update, + ) elif update_associations(client, latest_state, connection_id, lag_id): return True, connection_id @@ -293,53 +295,59 @@ def ensure_absent(client, connection_id): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(), location=dict(), - bandwidth=dict(choices=['1Gbps', '10Gbps']), + bandwidth=dict(choices=["1Gbps", "10Gbps"]), link_aggregation_group=dict(), connection_id=dict(), - forced_update=dict(type='bool', default=False) + forced_update=dict(type="bool", default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('connection_id', 'name')], - required_if=[('state', 'present', ('location', 'bandwidth'))] + required_one_of=[("connection_id", "name")], + required_if=[("state", "present", ("location", "bandwidth"))], ) - connection = module.client('directconnect') + connection = module.client("directconnect") - state = module.params.get('state') + state = module.params.get("state") try: connection_id = connection_exists( - connection, - connection_id=module.params.get('connection_id'), - connection_name=module.params.get('name') + connection, connection_id=module.params.get("connection_id"), connection_name=module.params.get("name") ) - if not connection_id and module.params.get('connection_id'): - module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id'))) - - if state == 'present': - changed, connection_id = ensure_present(connection, - connection_id=connection_id, - connection_name=module.params.get('name'), - location=module.params.get('location'), - bandwidth=module.params.get('bandwidth'), - lag_id=module.params.get('link_aggregation_group'), - forced_update=module.params.get('forced_update')) + if not connection_id and module.params.get("connection_id"): + module.fail_json( + msg=f"The Direct Connect connection {module.params['connection_id']} does not exist.", + ) + + if state == "present": + changed, connection_id = ensure_present( + connection, + connection_id=connection_id, + connection_name=module.params.get("name"), + location=module.params.get("location"), + bandwidth=module.params.get("bandwidth"), + lag_id=module.params.get("link_aggregation_group"), + forced_update=module.params.get("forced_update"), + ) response = connection_status(connection, connection_id) - elif state == 'absent': + elif state == "absent": changed = ensure_absent(connection, connection_id) response = {} except DirectConnectError as e: if e.last_traceback: - module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response)) + module.fail_json( + msg=e.msg, + exception=e.last_traceback, + **camel_dict_to_snake_dict(e.exception.response), + ) else: module.fail_json(msg=e.msg) module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py b/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py index 1433b387b..b231f0e8f 100644 --- a/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py +++ b/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: directconnect_gateway author: - Gobin Sougrakpam (@gobins) @@ -19,10 +17,6 @@ description: - Detaches Virtual Gateways to Direct Connect Gateway. - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_gateway). The usage did not change. -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: state: description: @@ -54,9 +48,13 @@ options: - How long to wait for the association to be deleted. type: int default: 320 -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new direct connect gateway attached to virtual private gateway community.aws.directconnect_gateway: state: present @@ -71,9 +69,9 @@ EXAMPLES = ''' name: my-dx-gateway amazon_asn: 7224 register: created_dxgw -''' +""" -RETURN = ''' +RETURN = r""" result: description: - The attributes of the Direct Connect Gateway @@ -95,7 +93,7 @@ result: owner_account: description: The AWS account ID of the owner of the direct connect gateway. type: str -''' +""" import time @@ -106,17 +104,18 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def dx_gateway_info(client, gateway_id, module): try: resp = client.describe_direct_connect_gateways( - directConnectGatewayId=gateway_id) + directConnectGatewayId=gateway_id, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to fetch gateway information.") - if resp['directConnectGateways']: - return resp['directConnectGateways'][0] + if resp["directConnectGateways"]: + return resp["directConnectGateways"][0] def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): @@ -130,9 +129,10 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): client, module, gateway_id=gateway_id, - virtual_gateway_id=virtual_gateway_id) - if response['directConnectGatewayAssociations']: - if response['directConnectGatewayAssociations'][0]['associationState'] == status: + virtual_gateway_id=virtual_gateway_id, + ) + if response["directConnectGatewayAssociations"]: + if response["directConnectGatewayAssociations"][0]["associationState"] == status: status_achieved = True break else: @@ -149,17 +149,18 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): def associate_direct_connect_gateway(client, module, gateway_id): params = dict() - params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") try: response = client.create_direct_connect_gateway_association( directConnectGatewayId=gateway_id, - virtualGatewayId=params['virtual_gateway_id']) + virtualGatewayId=params["virtual_gateway_id"], + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to associate gateway') + module.fail_json_aws(e, "Failed to associate gateway") - status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating') + status_achieved, dxgw = wait_for_status(client, module, gateway_id, params["virtual_gateway_id"], "associating") if not status_achieved: - module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console') + module.fail_json(msg="Error waiting for dxgw to attach to vpg - please check the AWS console") result = response return result @@ -169,13 +170,14 @@ def delete_association(client, module, gateway_id, virtual_gateway_id): try: response = client.delete_direct_connect_gateway_association( directConnectGatewayId=gateway_id, - virtualGatewayId=virtual_gateway_id) + virtualGatewayId=virtual_gateway_id, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete gateway association.") - status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating') + status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, "disassociating") if not status_achieved: - module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console') + module.fail_json(msg="Error waiting for dxgw to detach from vpg - please check the AWS console") result = response return result @@ -183,12 +185,13 @@ def delete_association(client, module, gateway_id, virtual_gateway_id): def create_dx_gateway(client, module): params = dict() - params['name'] = module.params.get('name') - params['amazon_asn'] = module.params.get('amazon_asn') + params["name"] = module.params.get("name") + params["amazon_asn"] = module.params.get("amazon_asn") try: response = client.create_direct_connect_gateway( - directConnectGatewayName=params['name'], - amazonSideAsn=int(params['amazon_asn'])) + directConnectGatewayName=params["name"], + amazonSideAsn=int(params["amazon_asn"]), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create direct connect gateway.") @@ -200,21 +203,21 @@ def find_dx_gateway(client, module, gateway_id=None): params = dict() gateways = list() if gateway_id is not None: - params['directConnectGatewayId'] = gateway_id + params["directConnectGatewayId"] = gateway_id while True: try: resp = client.describe_direct_connect_gateways(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe gateways") - gateways.extend(resp['directConnectGateways']) - if 'nextToken' in resp: - params['nextToken'] = resp['nextToken'] + gateways.extend(resp["directConnectGateways"]) + if "nextToken" in resp: + params["nextToken"] = resp["nextToken"] else: break if gateways != []: count = 0 for gateway in gateways: - if module.params.get('name') == gateway['directConnectGatewayName']: + if module.params.get("name") == gateway["directConnectGatewayName"]: count += 1 return gateway return None @@ -224,7 +227,7 @@ def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None): try: if virtual_gateway_id is None: resp = client.describe_direct_connect_gateway_associations( - directConnectGatewayId=gateway_id + directConnectGatewayId=gateway_id, ) else: resp = client.describe_direct_connect_gateway_associations( @@ -243,22 +246,20 @@ def ensure_present(client, module): changed = False params = dict() result = dict() - params['name'] = module.params.get('name') - params['amazon_asn'] = module.params.get('amazon_asn') - params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + params["name"] = module.params.get("name") + params["amazon_asn"] = module.params.get("amazon_asn") + params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") # check if a gateway matching our module args already exists existing_dxgw = find_dx_gateway(client, module) - if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted': - gateway_id = existing_dxgw['directConnectGatewayId'] + if existing_dxgw is not None and existing_dxgw["directConnectGatewayState"] != "deleted": + gateway_id = existing_dxgw["directConnectGatewayId"] # if a gateway_id was provided, check if it is attach to the DXGW - if params['virtual_gateway_id']: + if params["virtual_gateway_id"]: resp = check_dxgw_association( - client, - module, - gateway_id=gateway_id, - virtual_gateway_id=params['virtual_gateway_id']) + client, module, gateway_id=gateway_id, virtual_gateway_id=params["virtual_gateway_id"] + ) if not resp["directConnectGatewayAssociations"]: # attach the dxgw to the supplied virtual_gateway_id associate_direct_connect_gateway(client, module, gateway_id) @@ -269,26 +270,28 @@ def ensure_present(client, module): resp = check_dxgw_association(client, module, gateway_id=gateway_id) if resp["directConnectGatewayAssociations"]: - for association in resp['directConnectGatewayAssociations']: - if association['associationState'] not in ['disassociating', 'disassociated']: + for association in resp["directConnectGatewayAssociations"]: + if association["associationState"] not in ["disassociating", "disassociated"]: delete_association( client, module, gateway_id=gateway_id, - virtual_gateway_id=association['virtualGatewayId']) + virtual_gateway_id=association["virtualGatewayId"], + ) else: # create a new dxgw new_dxgw = create_dx_gateway(client, module) changed = True - gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId'] + gateway_id = new_dxgw["directConnectGateway"]["directConnectGatewayId"] # if a vpc-id was supplied, attempt to attach it to the dxgw - if params['virtual_gateway_id']: + if params["virtual_gateway_id"]: associate_direct_connect_gateway(client, module, gateway_id) - resp = check_dxgw_association(client, - module, - gateway_id=gateway_id - ) + resp = check_dxgw_association( + client, + module, + gateway_id=gateway_id, + ) if resp["directConnectGatewayAssociations"]: changed = True @@ -302,23 +305,23 @@ def ensure_absent(client, module): changed = False result = dict() - dx_gateway_id = module.params.get('direct_connect_gateway_id') + dx_gateway_id = module.params.get("direct_connect_gateway_id") existing_dxgw = find_dx_gateway(client, module, dx_gateway_id) if existing_dxgw is not None: - resp = check_dxgw_association(client, module, - gateway_id=dx_gateway_id) + resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) if resp["directConnectGatewayAssociations"]: - for association in resp['directConnectGatewayAssociations']: - if association['associationState'] not in ['disassociating', 'disassociated']: - delete_association(client, module, - gateway_id=dx_gateway_id, - virtual_gateway_id=association['virtualGatewayId']) + for association in resp["directConnectGatewayAssociations"]: + if association["associationState"] not in ["disassociating", "disassociated"]: + delete_association( + client, + module, + gateway_id=dx_gateway_id, + virtual_gateway_id=association["virtualGatewayId"], + ) # wait for deleting association - timeout = time.time() + module.params.get('wait_timeout') + timeout = time.time() + module.params.get("wait_timeout") while time.time() < timeout: - resp = check_dxgw_association(client, - module, - gateway_id=dx_gateway_id) + resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) if resp["directConnectGatewayAssociations"] != []: time.sleep(15) else: @@ -326,43 +329,44 @@ def ensure_absent(client, module): try: resp = client.delete_direct_connect_gateway( - directConnectGatewayId=dx_gateway_id + directConnectGatewayId=dx_gateway_id, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete gateway") - result = resp['directConnectGateway'] + result = resp["directConnectGateway"] return changed def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(), amazon_asn=dict(), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), - wait_timeout=dict(type='int', default=320), + wait_timeout=dict(type="int", default=320), + ) + required_if = [("state", "present", ["name", "amazon_asn"]), ("state", "absent", ["direct_connect_gateway_id"])] + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, ) - required_if = [('state', 'present', ['name', 'amazon_asn']), - ('state', 'absent', ['direct_connect_gateway_id'])] - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=required_if) - state = module.params.get('state') + state = module.params.get("state") try: - client = module.client('directconnect') + client = module.client("directconnect") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": (changed, results) = ensure_present(client, module) - elif state == 'absent': + elif state == "absent": changed = ensure_absent(client, module) results = {} module.exit_json(changed=changed, **camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py b/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py index cc7122712..99224fee0 100644 --- a/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py +++ b/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: directconnect_link_aggregation_group version_added: 1.0.0 @@ -17,10 +15,6 @@ description: The usage did not change. author: - "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: state: description: @@ -81,9 +75,13 @@ options: - The duration in seconds to wait if I(wait=true). default: 120 type: int -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" # create a Direct Connect connection - community.aws.directconnect_link_aggregation_group: @@ -93,7 +91,7 @@ EXAMPLES = """ bandwidth: 1Gbps """ -RETURN = """ +RETURN = r""" changed: type: str description: Whether or not the LAG has changed. @@ -163,8 +161,8 @@ region: returned: when I(state=present) """ -import traceback import time +import traceback try: import botocore @@ -173,13 +171,13 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def lag_status(client, lag_id): @@ -187,8 +185,8 @@ def lag_status(client, lag_id): def lag_exists(client, lag_id=None, lag_name=None, verify=True): - """ If verify=True, returns the LAG ID or None - If verify=False, returns the LAG's data (or an empty dict) + """If verify=True, returns the LAG ID or None + If verify=False, returns the LAG's data (or an empty dict) """ try: if lag_id: @@ -202,26 +200,24 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True): return {} else: failed_op = "Failed to describe DirectConnect link aggregation groups." - raise DirectConnectError(msg=failed_op, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError(msg=failed_op, last_traceback=traceback.format_exc(), exception=e) match = [] # List of LAG IDs that are exact matches lag = [] # List of LAG data that are exact matches # look for matching connections - if len(response.get('lags', [])) == 1 and lag_id: - if response['lags'][0]['lagState'] != 'deleted': - match.append(response['lags'][0]['lagId']) - lag.append(response['lags'][0]) + if len(response.get("lags", [])) == 1 and lag_id: + if response["lags"][0]["lagState"] != "deleted": + match.append(response["lags"][0]["lagId"]) + lag.append(response["lags"][0]) else: - for each in response.get('lags', []): - if each['lagState'] != 'deleted': + for each in response.get("lags", []): + if each["lagState"] != "deleted": if not lag_id: - if lag_name == each['lagName']: - match.append(each['lagId']) + if lag_name == each["lagName"]: + match.append(each["lagId"]) else: - match.append(each['lagId']) + match.append(each["lagId"]) # verifying if the connections exists; if true, return connection identifier, otherwise return False if verify and len(match) == 1: @@ -239,36 +235,41 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True): def create_lag(client, num_connections, location, bandwidth, name, connection_id): if not name: - raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.", - last_traceback=None, - exception="") - - parameters = dict(numberOfConnections=num_connections, - location=location, - connectionsBandwidth=bandwidth, - lagName=name) + raise DirectConnectError( + msg="Failed to create a Direct Connect link aggregation group: name required.", + last_traceback=None, + exception="", + ) + + parameters = dict( + numberOfConnections=num_connections, location=location, connectionsBandwidth=bandwidth, lagName=name + ) if connection_id: parameters.update(connectionId=connection_id) try: lag = client.create_lag(**parameters) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to create DirectConnect link aggregation group {name}", + last_traceback=traceback.format_exc(), + exception=e, + ) - return lag['lagId'] + return lag["lagId"] def delete_lag(client, lag_id): try: client.delete_lag(lagId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to delete Direct Connect link aggregation group {lag_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) -@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) +@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=["DirectConnectClientException"]) def _update_lag(client, lag_id, lag_name, min_links): params = {} if min_links: @@ -284,10 +285,9 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ if min_links and min_links > num_connections: raise DirectConnectError( - msg="The number of connections {0} must be greater than the minimum number of links " - "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), + msg=f"The number of connections {num_connections} must be greater than the minimum number of links {min_links} to update the LAG {lag_id}", last_traceback=None, - exception=None + exception=None, ) while True: @@ -296,27 +296,29 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ except botocore.exceptions.ClientError as e: if wait and time.time() - start <= wait_timeout: continue - msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id) - if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']: - msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links) - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + msg = f"Failed to update Direct Connect link aggregation group {lag_id}." + if "MinimumLinks cannot be set higher than the number of connections" in e.response["Error"]["Message"]: + msg += f"Unable to set the min number of links to {min_links} while the LAG connections are being requested" + raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) else: break def lag_changed(current_status, name, min_links): - """ Determines if a modifiable link aggregation group attribute has been modified. """ - return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks']) + """Determines if a modifiable link aggregation group attribute has been modified.""" + return (name and name != current_status["lagName"]) or (min_links and min_links != current_status["minimumLinks"]) -def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout): +def ensure_present( + client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout +): exists = lag_exists(client, lag_id, lag_name) if not exists and lag_id: - raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), - last_traceback=None, - exception="") + raise DirectConnectError( + msg=f"The Direct Connect link aggregation group {lag_id} does not exist.", + last_traceback=None, + exception="", + ) # the connection is found; get the latest state and see if it needs to be updated if exists: @@ -338,27 +340,31 @@ def describe_virtual_interfaces(client, lag_id): try: response = client.describe_virtual_interfaces(connectionId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), - last_traceback=traceback.format_exc(), - exception=e) - return response.get('virtualInterfaces', []) + raise DirectConnectError( + msg=f"Failed to describe any virtual interfaces associated with LAG: {lag_id}", + last_traceback=traceback.format_exc(), + exception=e, + ) + return response.get("virtualInterfaces", []) def get_connections_and_virtual_interfaces(client, lag_id): virtual_interfaces = describe_virtual_interfaces(client, lag_id) - connections = lag_status(client, lag_id=lag_id).get('connections', []) + connections = lag_status(client, lag_id=lag_id).get("connections", []) return virtual_interfaces, connections def disassociate_vis(client, lag_id, virtual_interfaces): for vi in virtual_interfaces: - delete_virtual_interface(client, vi['virtualInterfaceId']) + delete_virtual_interface(client, vi["virtualInterfaceId"]) try: - response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId']) + response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Could not delete virtual interface {vi} to delete link aggregation group {lag_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout): @@ -372,32 +378,41 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id) # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete - if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete: - raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " - "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " - "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " - "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), - last_traceback=None, - exception=None) + if any((latest_status["minimumLinks"], virtual_interfaces, connections)) and not force_delete: + raise DirectConnectError( + msg=( + "There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG" + f" {lag_id}. To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces" + " they will be deleted). Optionally, to ensure hosted connections are deleted after disassociation use" + " delete_with_disassociation: True and wait: True (as Virtual Interfaces may take a few moments to" + " delete)" + ), + last_traceback=None, + exception=None, + ) # update min_links to be 0 so we can remove the LAG update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout) # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached for connection in connections: - disassociate_connection_and_lag(client, connection['connectionId'], lag_id) + disassociate_connection_and_lag(client, connection["connectionId"], lag_id) if delete_with_disassociation: - delete_connection(client, connection['connectionId']) + delete_connection(client, connection["connectionId"]) for vi in virtual_interfaces: - delete_virtual_interface(client, vi['virtualInterfaceId']) + delete_virtual_interface(client, vi["virtualInterfaceId"]) start_time = time.time() while True: try: delete_lag(client, lag_id) except DirectConnectError as e: - if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait: + if ( + ("until its Virtual Interfaces are deleted" in e.exception) + and (time.time() - start_time < wait_timeout) + and wait + ): continue else: return True @@ -405,54 +420,58 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(), link_aggregation_group_id=dict(), - num_connections=dict(type='int'), - min_links=dict(type='int'), + num_connections=dict(type="int"), + min_links=dict(type="int"), location=dict(), bandwidth=dict(), connection_id=dict(), - delete_with_disassociation=dict(type='bool', default=False), - force_delete=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=120), + delete_with_disassociation=dict(type="bool", default=False), + force_delete=dict(type="bool", default=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=120), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('link_aggregation_group_id', 'name')], - required_if=[('state', 'present', ('location', 'bandwidth'))], + required_one_of=[("link_aggregation_group_id", "name")], + required_if=[("state", "present", ("location", "bandwidth"))], ) try: - connection = module.client('directconnect') + connection = module.client("directconnect") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') + state = module.params.get("state") response = {} try: - if state == 'present': - changed, lag_id = ensure_present(connection, - num_connections=module.params.get("num_connections"), - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - location=module.params.get("location"), - bandwidth=module.params.get("bandwidth"), - connection_id=module.params.get("connection_id"), - min_links=module.params.get("min_links"), - wait=module.params.get("wait"), - wait_timeout=module.params.get("wait_timeout")) + if state == "present": + changed, lag_id = ensure_present( + connection, + num_connections=module.params.get("num_connections"), + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + location=module.params.get("location"), + bandwidth=module.params.get("bandwidth"), + connection_id=module.params.get("connection_id"), + min_links=module.params.get("min_links"), + wait=module.params.get("wait"), + wait_timeout=module.params.get("wait_timeout"), + ) response = lag_status(connection, lag_id) elif state == "absent": - changed = ensure_absent(connection, - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - force_delete=module.params.get("force_delete"), - delete_with_disassociation=module.params.get("delete_with_disassociation"), - wait=module.params.get('wait'), - wait_timeout=module.params.get('wait_timeout')) + changed = ensure_absent( + connection, + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + force_delete=module.params.get("force_delete"), + delete_with_disassociation=module.params.get("delete_with_disassociation"), + wait=module.params.get("wait"), + wait_timeout=module.params.get("wait_timeout"), + ) except DirectConnectError as e: if e.last_traceback: module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception)) @@ -462,5 +481,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py b/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py index 059cd7425..da76d5737 100644 --- a/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py +++ b/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: directconnect_virtual_interface version_added: 1.0.0 @@ -86,12 +84,12 @@ options: - The virtual interface ID. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -RETURN = r''' +RETURN = r""" address_family: description: The address family for the BGP peer. returned: always @@ -228,9 +226,9 @@ vlan: returned: always type: int sample: 100 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: create an association between a LAG and connection community.aws.directconnect_virtual_interface: @@ -244,81 +242,87 @@ EXAMPLES = r''' state: absent connection_id: dxcon-XXXXXXXX virtual_interface_id: dxv-XXXXXXXX - -''' +""" import traceback try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # handled by AnsibleAWSModule pass from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def try_except_ClientError(failure_msg): - ''' - Wrapper for boto3 calls that uses AWSRetry and handles exceptions - ''' + """ + Wrapper for boto3 calls that uses AWSRetry and handles exceptions + """ + def wrapper(f): def run_func(*args, **kwargs): try: - result = AWSRetry.jittered_backoff(retries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) + result = AWSRetry.jittered_backoff( + retries=8, delay=5, catch_extra_error_codes=["DirectConnectClientException"] + )(f)(*args, **kwargs) except (ClientError, BotoCoreError) as e: raise DirectConnectError(failure_msg, traceback.format_exc(), e) return result + return run_func + return wrapper def find_unique_vi(client, connection_id, virtual_interface_id, name): - ''' - Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. - If multiple matches are found False is returned. If no matches are found None is returned. - ''' + """ + Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. + If multiple matches are found False is returned. If no matches are found None is returned. + """ # Get the virtual interfaces, filtering by the ID if provided. vi_params = {} if virtual_interface_id: - vi_params = {'virtualInterfaceId': virtual_interface_id} + vi_params = {"virtualInterfaceId": virtual_interface_id} - virtual_interfaces = try_except_ClientError( - failure_msg="Failed to describe virtual interface")( - client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces') + virtual_interfaces = try_except_ClientError(failure_msg="Failed to describe virtual interface")( + client.describe_virtual_interfaces + )(**vi_params).get("virtualInterfaces") # Remove deleting/deleted matches from the results. - virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')] + virtual_interfaces = [vi for vi in virtual_interfaces if vi["virtualInterfaceState"] not in ("deleting", "deleted")] matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id) return exact_match(matching_virtual_interfaces) def exact_match(virtual_interfaces): - ''' - Returns the virtual interface ID if one was found, - None if the virtual interface ID needs to be created, - False if an exact match was not found - ''' + """ + Returns the virtual interface ID if one was found, + None if the virtual interface ID needs to be created, + False if an exact match was not found + """ if not virtual_interfaces: return None if len(virtual_interfaces) == 1: - return virtual_interfaces[0]['virtualInterfaceId'] + return virtual_interfaces[0]["virtualInterfaceId"] else: return False def filter_virtual_interfaces(virtual_interfaces, name, connection_id): - ''' - Filters the available virtual interfaces to try to find a unique match - ''' + """ + Filters the available virtual interfaces to try to find a unique match + """ # Filter by name if provided. if name: matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name) @@ -339,52 +343,56 @@ def filter_virtual_interfaces(virtual_interfaces, name, connection_id): def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id): - ''' - Return virtual interfaces that have the connection_id associated - ''' - return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id] + """ + Return virtual interfaces that have the connection_id associated + """ + return [vi for vi in virtual_interfaces if vi["connectionId"] == connection_id] def find_virtual_interface_by_name(virtual_interfaces, name): - ''' - Return virtual interfaces that match the provided name - ''' - return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name] + """ + Return virtual interfaces that match the provided name + """ + return [vi for vi in virtual_interfaces if vi["virtualInterfaceName"] == name] def vi_state(client, virtual_interface_id): - ''' - Returns the state of the virtual interface. - ''' - err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id) - vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id) - return vi['virtualInterfaces'][0] + """ + Returns the state of the virtual interface. + """ + err_msg = f"Failed to describe virtual interface: {virtual_interface_id}" + vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)( + virtualInterfaceId=virtual_interface_id + ) + return vi["virtualInterfaces"][0] def assemble_params_for_creating_vi(params): - ''' - Returns kwargs to use in the call to create the virtual interface - - Params for public virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr - Params for private virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId - ''' - - public = params['public'] - name = params['name'] - vlan = params['vlan'] - bgp_asn = params['bgp_asn'] - auth_key = params['authentication_key'] - amazon_addr = params['amazon_address'] - customer_addr = params['customer_address'] - family_addr = params['address_type'] - cidr = params['cidr'] - virtual_gateway_id = params['virtual_gateway_id'] - direct_connect_gateway_id = params['direct_connect_gateway_id'] + """ + Returns kwargs to use in the call to create the virtual interface + + Params for public virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr + Params for private virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId + """ + + public = params["public"] + name = params["name"] + vlan = params["vlan"] + bgp_asn = params["bgp_asn"] + auth_key = params["authentication_key"] + amazon_addr = params["amazon_address"] + customer_addr = params["customer_address"] + family_addr = params["address_type"] + cidr = params["cidr"] + virtual_gateway_id = params["virtual_gateway_id"] + direct_connect_gateway_id = params["direct_connect_gateway_id"] parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn) - opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr) + opt_params = dict( + authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr + ) for name, value in opt_params.items(): if value: @@ -392,68 +400,74 @@ def assemble_params_for_creating_vi(params): # virtual interface type specific parameters if public and cidr: - parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr] + parameters["routeFilterPrefixes"] = [{"cidr": c} for c in cidr] if not public: if virtual_gateway_id: - parameters['virtualGatewayId'] = virtual_gateway_id + parameters["virtualGatewayId"] = virtual_gateway_id elif direct_connect_gateway_id: - parameters['directConnectGatewayId'] = direct_connect_gateway_id + parameters["directConnectGatewayId"] = direct_connect_gateway_id return parameters def create_vi(client, public, associated_id, creation_params): - ''' - :param public: a boolean - :param associated_id: a link aggregation group ID or connection ID to associate - with the virtual interface. - :param creation_params: a dict of parameters to use in the AWS SDK call - :return The ID of the created virtual interface - ''' + """ + :param public: a boolean + :param associated_id: a link aggregation group ID or connection ID to associate + with the virtual interface. + :param creation_params: a dict of parameters to use in the AWS SDK call + :return The ID of the created virtual interface + """ err_msg = "Failed to create virtual interface" if public: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id, - newPublicVirtualInterface=creation_params) + vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)( + connectionId=associated_id, newPublicVirtualInterface=creation_params + ) else: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id, - newPrivateVirtualInterface=creation_params) - return vi['virtualInterfaceId'] + vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)( + connectionId=associated_id, newPrivateVirtualInterface=creation_params + ) + return vi["virtualInterfaceId"] def modify_vi(client, virtual_interface_id, connection_id): - ''' - Associate a new connection ID - ''' - err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id) - try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id, - connectionId=connection_id) + """ + Associate a new connection ID + """ + err_msg = f"Unable to associate {connection_id} with virtual interface {virtual_interface_id}" + try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)( + virtualInterfaceId=virtual_interface_id, connectionId=connection_id + ) def needs_modification(client, virtual_interface_id, connection_id): - ''' - Determine if the associated connection ID needs to be updated - ''' - return vi_state(client, virtual_interface_id).get('connectionId') != connection_id + """ + Determine if the associated connection ID needs to be updated + """ + return vi_state(client, virtual_interface_id).get("connectionId") != connection_id def ensure_state(connection, module): changed = False - state = module.params['state'] - connection_id = module.params['id_to_associate'] - public = module.params['public'] - name = module.params['name'] + state = module.params["state"] + connection_id = module.params["id_to_associate"] + public = module.params["public"] + name = module.params["name"] - virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name) + virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get("virtual_interface_id"), name) if virtual_interface_id is False: - module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " - "and connection_id options if applicable to find a unique match.") - - if state == 'present': + module.fail_json( + msg=( + "Multiple virtual interfaces were found. Use the virtual_interface_id, name, " + "and connection_id options if applicable to find a unique match." + ) + ) - if not virtual_interface_id and module.params['virtual_interface_id']: - module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id'])) + if state == "present": + if not virtual_interface_id and module.params["virtual_interface_id"]: + module.fail_json(msg=f"The virtual interface {module.params['virtual_interface_id']} does not exist.") elif not virtual_interface_id: assembled_params = assemble_params_for_creating_vi(module.params) @@ -478,31 +492,35 @@ def ensure_state(connection, module): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']), - public=dict(type='bool'), + state=dict(required=True, choices=["present", "absent"]), + id_to_associate=dict(required=True, aliases=["link_aggregation_group_id", "connection_id"]), + public=dict(type="bool"), name=dict(), - vlan=dict(type='int', default=100), - bgp_asn=dict(type='int', default=65000), + vlan=dict(type="int", default=100), + bgp_asn=dict(type="int", default=65000), authentication_key=dict(no_log=True), amazon_address=dict(), customer_address=dict(), address_type=dict(), - cidr=dict(type='list', elements='str'), + cidr=dict(type="list", elements="str"), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), - virtual_interface_id=dict() + virtual_interface_id=dict(), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_one_of=[['virtual_interface_id', 'name']], - required_if=[['state', 'present', ['public']], - ['public', True, ['amazon_address']], - ['public', True, ['customer_address']], - ['public', True, ['cidr']]], - mutually_exclusive=[['virtual_gateway_id', 'direct_connect_gateway_id']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[["virtual_interface_id", "name"]], + required_if=[ + ["state", "present", ["public"]], + ["public", True, ["amazon_address"]], + ["public", True, ["customer_address"]], + ["public", True, ["cidr"]], + ], + mutually_exclusive=[["virtual_gateway_id", "direct_connect_gateway_id"]], + ) - connection = module.client('directconnect') + connection = module.client("directconnect") try: changed, latest_state = ensure_state(connection, module) @@ -515,5 +533,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/dms_endpoint.py b/ansible_collections/community/aws/plugins/modules/dms_endpoint.py index fb899d669..f67a1263e 100644 --- a/ansible_collections/community/aws/plugins/modules/dms_endpoint.py +++ b/ansible_collections/community/aws/plugins/modules/dms_endpoint.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: dms_endpoint version_added: 1.0.0 @@ -143,13 +141,13 @@ options: author: - "Rui Moreira (@ruimoreira)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details - name: Endpoint Creation community.aws.dms_endpoint: @@ -164,9 +162,9 @@ EXAMPLES = ''' databasename: 'testdb' sslmode: none wait: false -''' +""" -RETURN = ''' +RETURN = r""" endpoint: description: - A description of the DMS endpoint. @@ -325,7 +323,7 @@ endpoint: - Additional settings for Redis endpoints. type: dict returned: when the I(endpoint_type) is C(redshift) -''' +""" try: import botocore @@ -334,20 +332,21 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + backoff_params = dict(retries=5, delay=1, backoff=1.5) @AWSRetry.jittered_backoff(**backoff_params) def dms_describe_tags(connection, **params): - """ checks if the endpoint exists """ - tags = connection.list_tags_for_resource(**params).get('TagList', []) + """checks if the endpoint exists""" + tags = connection.list_tags_for_resource(**params).get("TagList", []) return boto3_tag_list_to_ansible_dict(tags) @@ -355,15 +354,14 @@ def dms_describe_tags(connection, **params): def dms_describe_endpoints(connection, **params): try: endpoints = connection.describe_endpoints(**params) - except is_boto3_error_code('ResourceNotFoundFault'): + except is_boto3_error_code("ResourceNotFoundFault"): return None - return endpoints.get('Endpoints', None) + return endpoints.get("Endpoints", None) def describe_endpoint(connection, endpoint_identifier): - """ checks if the endpoint exists """ - endpoint_filter = dict(Name='endpoint-id', - Values=[endpoint_identifier]) + """checks if the endpoint exists""" + endpoint_filter = dict(Name="endpoint-id", Values=[endpoint_identifier]) try: endpoints = dms_describe_endpoints(connection, Filters=[endpoint_filter]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -374,8 +372,8 @@ def describe_endpoint(connection, endpoint_identifier): endpoint = endpoints[0] try: - tags = dms_describe_tags(connection, ResourceArn=endpoint['EndpointArn']) - endpoint['tags'] = tags + tags = dms_describe_tags(connection, ResourceArn=endpoint["EndpointArn"]) + endpoint["tags"] = tags except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe the DMS endpoint tags") return endpoint @@ -384,7 +382,7 @@ def describe_endpoint(connection, endpoint_identifier): @AWSRetry.jittered_backoff(**backoff_params) def dms_delete_endpoint(client, **params): """deletes the DMS endpoint based on the EndpointArn""" - if module.params.get('wait'): + if module.params.get("wait"): return delete_dms_endpoint(client) else: return client.delete_endpoint(**params) @@ -392,19 +390,19 @@ def dms_delete_endpoint(client, **params): @AWSRetry.jittered_backoff(**backoff_params) def dms_create_endpoint(client, **params): - """ creates the DMS endpoint""" + """creates the DMS endpoint""" return client.create_endpoint(**params) @AWSRetry.jittered_backoff(**backoff_params) def dms_modify_endpoint(client, **params): - """ updates the endpoint""" + """updates the endpoint""" return client.modify_endpoint(**params) @AWSRetry.jittered_backoff(**backoff_params) def get_endpoint_deleted_waiter(client): - return client.get_waiter('endpoint_deleted') + return client.get_waiter("endpoint_deleted") @AWSRetry.jittered_backoff(**backoff_params) @@ -418,32 +416,22 @@ def dms_add_tags(client, **params): def endpoint_exists(endpoint): - """ Returns boolean based on the existence of the endpoint + """Returns boolean based on the existence of the endpoint :param endpoint: dict containing the described endpoint :return: bool """ - return bool(len(endpoint['Endpoints'])) + return bool(len(endpoint["Endpoints"])) def delete_dms_endpoint(connection, endpoint_arn): try: - delete_arn = dict( - EndpointArn=endpoint_arn - ) - if module.params.get('wait'): - + delete_arn = dict(EndpointArn=endpoint_arn) + if module.params.get("wait"): delete_output = connection.delete_endpoint(**delete_arn) delete_waiter = get_endpoint_deleted_waiter(connection) delete_waiter.wait( - Filters=[{ - 'Name': 'endpoint-arn', - 'Values': [endpoint_arn] - - }], - WaiterConfig={ - 'Delay': module.params.get('timeout'), - 'MaxAttempts': module.params.get('retries') - } + Filters=[{"Name": "endpoint-arn", "Values": [endpoint_arn]}], + WaiterConfig={"Delay": module.params.get("timeout"), "MaxAttempts": module.params.get("retries")}, ) return delete_output else: @@ -458,71 +446,62 @@ def create_module_params(): :return: dict """ endpoint_parameters = dict( - EndpointIdentifier=module.params.get('endpointidentifier'), - EndpointType=module.params.get('endpointtype'), - EngineName=module.params.get('enginename'), - Username=module.params.get('username'), - Password=module.params.get('password'), - ServerName=module.params.get('servername'), - Port=module.params.get('port'), - DatabaseName=module.params.get('databasename'), - SslMode=module.params.get('sslmode') + EndpointIdentifier=module.params.get("endpointidentifier"), + EndpointType=module.params.get("endpointtype"), + EngineName=module.params.get("enginename"), + Username=module.params.get("username"), + Password=module.params.get("password"), + ServerName=module.params.get("servername"), + Port=module.params.get("port"), + DatabaseName=module.params.get("databasename"), + SslMode=module.params.get("sslmode"), ) - if module.params.get('EndpointArn'): - endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn') - if module.params.get('certificatearn'): - endpoint_parameters['CertificateArn'] = \ - module.params.get('certificatearn') + if module.params.get("EndpointArn"): + endpoint_parameters["EndpointArn"] = module.params.get("EndpointArn") + if module.params.get("certificatearn"): + endpoint_parameters["CertificateArn"] = module.params.get("certificatearn") - if module.params.get('dmstransfersettings'): - endpoint_parameters['DmsTransferSettings'] = \ - module.params.get('dmstransfersettings') + if module.params.get("dmstransfersettings"): + endpoint_parameters["DmsTransferSettings"] = module.params.get("dmstransfersettings") - if module.params.get('extraconnectionattributes'): - endpoint_parameters['ExtraConnectionAttributes'] =\ - module.params.get('extraconnectionattributes') + if module.params.get("extraconnectionattributes"): + endpoint_parameters["ExtraConnectionAttributes"] = module.params.get("extraconnectionattributes") - if module.params.get('kmskeyid'): - endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid') + if module.params.get("kmskeyid"): + endpoint_parameters["KmsKeyId"] = module.params.get("kmskeyid") - if module.params.get('tags'): - endpoint_parameters['Tags'] = module.params.get('tags') + if module.params.get("tags"): + endpoint_parameters["Tags"] = module.params.get("tags") - if module.params.get('serviceaccessrolearn'): - endpoint_parameters['ServiceAccessRoleArn'] = \ - module.params.get('serviceaccessrolearn') + if module.params.get("serviceaccessrolearn"): + endpoint_parameters["ServiceAccessRoleArn"] = module.params.get("serviceaccessrolearn") - if module.params.get('externaltabledefinition'): - endpoint_parameters['ExternalTableDefinition'] = \ - module.params.get('externaltabledefinition') + if module.params.get("externaltabledefinition"): + endpoint_parameters["ExternalTableDefinition"] = module.params.get("externaltabledefinition") - if module.params.get('dynamodbsettings'): - endpoint_parameters['DynamoDbSettings'] = \ - module.params.get('dynamodbsettings') + if module.params.get("dynamodbsettings"): + endpoint_parameters["DynamoDbSettings"] = module.params.get("dynamodbsettings") - if module.params.get('s3settings'): - endpoint_parameters['S3Settings'] = module.params.get('s3settings') + if module.params.get("s3settings"): + endpoint_parameters["S3Settings"] = module.params.get("s3settings") - if module.params.get('mongodbsettings'): - endpoint_parameters['MongoDbSettings'] = \ - module.params.get('mongodbsettings') + if module.params.get("mongodbsettings"): + endpoint_parameters["MongoDbSettings"] = module.params.get("mongodbsettings") - if module.params.get('kinesissettings'): - endpoint_parameters['KinesisSettings'] = \ - module.params.get('kinesissettings') + if module.params.get("kinesissettings"): + endpoint_parameters["KinesisSettings"] = module.params.get("kinesissettings") - if module.params.get('elasticsearchsettings'): - endpoint_parameters['ElasticsearchSettings'] = \ - module.params.get('elasticsearchsettings') + if module.params.get("elasticsearchsettings"): + endpoint_parameters["ElasticsearchSettings"] = module.params.get("elasticsearchsettings") - if module.params.get('wait'): - endpoint_parameters['wait'] = module.boolean(module.params.get('wait')) + if module.params.get("wait"): + endpoint_parameters["wait"] = module.boolean(module.params.get("wait")) - if module.params.get('timeout'): - endpoint_parameters['timeout'] = module.params.get('timeout') + if module.params.get("timeout"): + endpoint_parameters["timeout"] = module.params.get("timeout") - if module.params.get('retries'): - endpoint_parameters['retries'] = module.params.get('retries') + if module.params.get("retries"): + endpoint_parameters["retries"] = module.params.get("retries") return endpoint_parameters @@ -538,14 +517,16 @@ def compare_params(param_described): param_described = dict(param_described) modparams = create_module_params() # modify can't update tags - param_described.pop('Tags', None) - modparams.pop('Tags', None) + param_described.pop("Tags", None) + modparams.pop("Tags", None) changed = False for paramname in modparams: - if paramname == 'Password' or paramname in param_described \ - and param_described[paramname] == modparams[paramname] or \ - str(param_described[paramname]).lower() \ - == modparams[paramname]: + if ( + paramname == "Password" + or paramname in param_described + and param_described[paramname] == modparams[paramname] + or str(param_described[paramname]).lower() == modparams[paramname] + ): pass else: changed = True @@ -553,25 +534,24 @@ def compare_params(param_described): def modify_dms_endpoint(connection, endpoint): - arn = endpoint['EndpointArn'] + arn = endpoint["EndpointArn"] try: params = create_module_params() # modify can't update tags - params.pop('Tags', None) + params.pop("Tags", None) return dms_modify_endpoint(connection, EndpointArn=arn, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update DMS endpoint.", params=params) def ensure_tags(connection, endpoint): - desired_tags = module.params.get('tags', None) + desired_tags = module.params.get("tags", None) if desired_tags is None: return False - current_tags = endpoint.get('tags', {}) + current_tags = endpoint.get("tags", {}) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, - module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, module.params.get("purge_tags")) if not tags_to_remove and not tags_to_add: return False @@ -579,7 +559,7 @@ def ensure_tags(connection, endpoint): if module.check_mode: return True - arn = endpoint.get('EndpointArn') + arn = endpoint.get("EndpointArn") try: if tags_to_remove: @@ -609,36 +589,49 @@ def create_dms_endpoint(connection): def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), endpointidentifier=dict(required=True), - endpointtype=dict(choices=['source', 'target']), - enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb', - 'aurora', 'redshift', 's3', 'db2', 'azuredb', - 'sybase', 'dynamodb', 'mongodb', 'sqlserver'], - required=False), + endpointtype=dict(choices=["source", "target"]), + enginename=dict( + choices=[ + "mysql", + "oracle", + "postgres", + "mariadb", + "aurora", + "redshift", + "s3", + "db2", + "azuredb", + "sybase", + "dynamodb", + "mongodb", + "sqlserver", + ], + required=False, + ), username=dict(), password=dict(no_log=True), servername=dict(), - port=dict(type='int'), + port=dict(type="int"), databasename=dict(), extraconnectionattributes=dict(), kmskeyid=dict(no_log=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), certificatearn=dict(), - sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'], - default='none'), + sslmode=dict(choices=["none", "require", "verify-ca", "verify-full"], default="none"), serviceaccessrolearn=dict(), externaltabledefinition=dict(), - dynamodbsettings=dict(type='dict'), - s3settings=dict(type='dict'), - dmstransfersettings=dict(type='dict'), - mongodbsettings=dict(type='dict'), - kinesissettings=dict(type='dict'), - elasticsearchsettings=dict(type='dict'), - wait=dict(type='bool', default=False), - timeout=dict(type='int'), - retries=dict(type='int') + dynamodbsettings=dict(type="dict"), + s3settings=dict(type="dict"), + dmstransfersettings=dict(type="dict"), + mongodbsettings=dict(type="dict"), + kinesissettings=dict(type="dict"), + elasticsearchsettings=dict(type="dict"), + wait=dict(type="bool", default=False), + timeout=dict(type="int"), + retries=dict(type="int"), ) global module module = AnsibleAWSModule( @@ -650,49 +643,48 @@ def main(): ["wait", "True", ["timeout"]], ["wait", "True", ["retries"]], ], - supports_check_mode=False + supports_check_mode=False, ) exit_message = None changed = False - state = module.params.get('state') + state = module.params.get("state") - dmsclient = module.client('dms') - endpoint = describe_endpoint(dmsclient, - module.params.get('endpointidentifier')) - if state == 'present': + dmsclient = module.client("dms") + endpoint = describe_endpoint(dmsclient, module.params.get("endpointidentifier")) + if state == "present": if endpoint: changed |= ensure_tags(dmsclient, endpoint) params_changed = compare_params(endpoint) if params_changed: updated_dms = modify_dms_endpoint(dmsclient, endpoint) exit_message = updated_dms - endpoint = exit_message.get('Endpoint') + endpoint = exit_message.get("Endpoint") changed = True else: exit_message = "Endpoint Already Exists" else: exit_message = create_dms_endpoint(dmsclient) - endpoint = exit_message.get('Endpoint') + endpoint = exit_message.get("Endpoint") changed = True if changed: # modify and create don't return tags - tags = dms_describe_tags(dmsclient, ResourceArn=endpoint['EndpointArn']) - endpoint['tags'] = tags - elif state == 'absent': + tags = dms_describe_tags(dmsclient, ResourceArn=endpoint["EndpointArn"]) + endpoint["tags"] = tags + elif state == "absent": if endpoint: - delete_results = delete_dms_endpoint(dmsclient, endpoint['EndpointArn']) + delete_results = delete_dms_endpoint(dmsclient, endpoint["EndpointArn"]) exit_message = delete_results endpoint = None changed = True else: changed = False - exit_message = 'DMS Endpoint does not exist' + exit_message = "DMS Endpoint does not exist" - endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=['tags']) + endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=["tags"]) module.exit_json(changed=changed, endpoint=endpoint, msg=exit_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py b/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py index fb5d59613..772a54aa1 100644 --- a/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py +++ b/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: dms_replication_subnet_group version_added: 1.0.0 @@ -43,29 +41,29 @@ options: author: - "Rui Moreira (@ruimoreira)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - community.aws.dms_replication_subnet_group: state: present identifier: "dev-sngroup" description: "Development Subnet Group asdasdas" - subnet_ids: ['subnet-id1','subnet-id2'] -''' + subnet_ids: ['subnet-id1', 'subnet-id2'] +""" -RETURN = ''' # ''' +RETURN = r""" # """ try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule backoff_params = dict(retries=5, delay=1, backoff=1.5) @@ -74,16 +72,15 @@ backoff_params = dict(retries=5, delay=1, backoff=1.5) def describe_subnet_group(connection, subnet_group): """checks if instance exists""" try: - subnet_group_filter = dict(Name='replication-subnet-group-id', - Values=[subnet_group]) + subnet_group_filter = dict(Name="replication-subnet-group-id", Values=[subnet_group]) return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter]) except botocore.exceptions.ClientError: - return {'ReplicationSubnetGroups': []} + return {"ReplicationSubnetGroups": []} @AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_create(connection, **params): - """ creates the replication subnet group """ + """creates the replication subnet group""" return connection.create_replication_subnet_group(**params) @@ -94,17 +91,17 @@ def replication_subnet_group_modify(connection, **modify_params): @AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_delete(module, connection): - subnetid = module.params.get('identifier') + subnetid = module.params.get("identifier") delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid) return connection.delete_replication_subnet_group(**delete_parameters) def replication_subnet_exists(subnet): - """ Returns boolean based on the existence of the endpoint + """Returns boolean based on the existence of the endpoint :param endpoint: dict containing the described endpoint :return: bool """ - return bool(len(subnet['ReplicationSubnetGroups'])) + return bool(len(subnet["ReplicationSubnetGroups"])) def create_module_params(module): @@ -114,9 +111,9 @@ def create_module_params(module): """ instance_parameters = dict( # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API - ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(), - ReplicationSubnetGroupDescription=module.params.get('description'), - SubnetIds=module.params.get('subnet_ids'), + ReplicationSubnetGroupIdentifier=module.params.get("identifier").lower(), + ReplicationSubnetGroupDescription=module.params.get("description"), + SubnetIds=module.params.get("subnet_ids"), ) return instance_parameters @@ -133,19 +130,18 @@ def compare_params(module, param_described): modparams = create_module_params(module) changed = False # need to sanitize values that get returned from the API - if 'VpcId' in param_described.keys(): - param_described.pop('VpcId') - if 'SubnetGroupStatus' in param_described.keys(): - param_described.pop('SubnetGroupStatus') + if "VpcId" in param_described.keys(): + param_described.pop("VpcId") + if "SubnetGroupStatus" in param_described.keys(): + param_described.pop("SubnetGroupStatus") for paramname in modparams.keys(): - if paramname in param_described.keys() and \ - param_described.get(paramname) == modparams[paramname]: + if paramname in param_described.keys() and param_described.get(paramname) == modparams[paramname]: pass - elif paramname == 'SubnetIds': + elif paramname == "SubnetIds": subnets = [] - for subnet in param_described.get('Subnets'): - subnets.append(subnet.get('SubnetIdentifier')) - for modulesubnet in modparams['SubnetIds']: + for subnet in param_described.get("Subnets"): + subnets.append(subnet.get("SubnetIdentifier")) + for modulesubnet in modparams["SubnetIds"]: if modulesubnet in subnets: pass else: @@ -171,23 +167,19 @@ def modify_replication_subnet_group(module, connection): def main(): argument_spec = dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - identifier=dict(type='str', required=True), - description=dict(type='str', required=True), - subnet_ids=dict(type='list', elements='str', required=True), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + state=dict(type="str", choices=["present", "absent"], default="present"), + identifier=dict(type="str", required=True), + description=dict(type="str", required=True), + subnet_ids=dict(type="list", elements="str", required=True), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) exit_message = None changed = False - state = module.params.get('state') - dmsclient = module.client('dms') - subnet_group = describe_subnet_group(dmsclient, - module.params.get('identifier')) - if state == 'present': + state = module.params.get("state") + dmsclient = module.client("dms") + subnet_group = describe_subnet_group(dmsclient, module.params.get("identifier")) + if state == "present": if replication_subnet_exists(subnet_group): if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]): if not module.check_mode: @@ -204,7 +196,7 @@ def main(): else: exit_message = "Check mode enabled" - elif state == 'absent': + elif state == "absent": if replication_subnet_exists(subnet_group): if not module.check_mode: replication_subnet_group_delete(module, dmsclient) @@ -221,5 +213,5 @@ def main(): module.exit_json(changed=changed, msg=exit_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/dynamodb_table.py b/ansible_collections/community/aws/plugins/modules/dynamodb_table.py index 28d334fc9..86ba2f05e 100644 --- a/ansible_collections/community/aws/plugins/modules/dynamodb_table.py +++ b/ansible_collections/community/aws/plugins/modules/dynamodb_table.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: dynamodb_table version_added: 1.0.0 @@ -125,30 +123,31 @@ options: table_class: description: - The class of the table. - - Requires at least botocore version 1.23.18. choices: ['STANDARD', 'STANDARD_INFREQUENT_ACCESS'] type: str version_added: 3.1.0 wait_timeout: description: - How long (in seconds) to wait for creation / update / deletion to complete. + - AWS only allows secondary indexies to be updated one at a time, this module will automatically update them + in serial, and the timeout will be separately applied for each index. aliases: ['wait_for_active_timeout'] - default: 300 + default: 900 type: int wait: description: - When I(wait=True) the module will wait for up to I(wait_timeout) seconds - for table creation or deletion to complete before returning. + for index updates, table creation or deletion to complete before returning. default: True type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create dynamo table with hash and range primary key community.aws.dynamodb_table: name: my-table @@ -197,9 +196,9 @@ EXAMPLES = r''' name: my-table region: us-east-1 state: absent -''' +""" -RETURN = r''' +RETURN = r""" table: description: The returned table params from the describe API call. returned: success @@ -243,29 +242,39 @@ table_status: returned: success type: str sample: ACTIVE -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - - -DYNAMO_TYPE_DEFAULT = 'STRING' -INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name'] -INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity'] -INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_indexes_active +from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_exists +from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_not_exists +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + +DYNAMO_TYPE_DEFAULT = "STRING" +INDEX_REQUIRED_OPTIONS = ["name", "type", "hash_key_name"] +INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + [ + "hash_key_type", + "range_key_name", + "range_key_type", + "includes", + "read_capacity", + "write_capacity", +] +INDEX_TYPE_OPTIONS = ["all", "global_all", "global_include", "global_keys_only", "include", "keys_only"] # Map in both directions -DYNAMO_TYPE_MAP_LONG = {'STRING': 'S', 'NUMBER': 'N', 'BINARY': 'B'} +DYNAMO_TYPE_MAP_LONG = {"STRING": "S", "NUMBER": "N", "BINARY": "B"} DYNAMO_TYPE_MAP_SHORT = dict((v, k) for k, v in DYNAMO_TYPE_MAP_LONG.items()) KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys()) @@ -274,58 +283,43 @@ KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys()) # LimitExceededException/ResourceInUseException exceptions at you. This can be # pretty slow, so add plenty of retries... @AWSRetry.jittered_backoff( - retries=45, delay=5, max_delay=30, - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], + retries=45, + delay=5, + max_delay=30, + catch_extra_error_codes=["ResourceInUseException", "ResourceNotFoundException"], ) def _update_table_with_long_retry(**changes): - return client.update_table( - TableName=module.params.get('name'), - **changes - ) + return client.update_table(TableName=module.params.get("name"), **changes) # ResourceNotFoundException is expected here if the table doesn't exist -@AWSRetry.jittered_backoff(catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"]) def _describe_table(**params): return client.describe_table(**params) def wait_exists(): - table_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') - - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - - try: - waiter = client.get_waiter('table_exists') - waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, - TableName=table_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on table creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed while waiting on table creation') + wait_table_exists( + module, + module.params.get("wait_timeout"), + module.params.get("name"), + ) def wait_not_exists(): - table_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') + wait_table_not_exists( + module, + module.params.get("wait_timeout"), + module.params.get("name"), + ) - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - try: - waiter = client.get_waiter('table_not_exists') - waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, - TableName=table_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on table deletion') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed while waiting on table deletion') +def wait_indexes(): + wait_indexes_active( + module, + module.params.get("wait_timeout"), + module.params.get("name"), + ) def _short_type_to_long(short_key): @@ -361,21 +355,21 @@ def _decode_primary_index(current_table): # The schema/attribute definitions are a list of dicts which need the same # treatment as boto3's tag lists schema = boto3_tag_list_to_ansible_dict( - current_table.get('key_schema', []), + current_table.get("key_schema", []), # Map from 'HASH'/'RANGE' to attribute name - tag_name_key_name='key_type', - tag_value_key_name='attribute_name', + tag_name_key_name="key_type", + tag_value_key_name="attribute_name", ) attributes = boto3_tag_list_to_ansible_dict( - current_table.get('attribute_definitions', []), + current_table.get("attribute_definitions", []), # Map from attribute name to 'S'/'N'/'B'. - tag_name_key_name='attribute_name', - tag_value_key_name='attribute_type', + tag_name_key_name="attribute_name", + tag_value_key_name="attribute_type", ) - hash_key_name = schema.get('HASH') + hash_key_name = schema.get("HASH") hash_key_type = _short_type_to_long(attributes.get(hash_key_name, None)) - range_key_name = schema.get('RANGE', None) + range_key_name = schema.get("RANGE", None) range_key_type = _short_type_to_long(attributes.get(range_key_name, None)) return dict( @@ -386,56 +380,56 @@ def _decode_primary_index(current_table): ) -def _decode_index(index_data, attributes, type_prefix=''): +def _decode_index(index_data, attributes, type_prefix=""): try: index_map = dict( - name=index_data['index_name'], + name=index_data["index_name"], ) index_data = dict(index_data) - index_data['attribute_definitions'] = attributes + index_data["attribute_definitions"] = attributes index_map.update(_decode_primary_index(index_data)) - throughput = index_data.get('provisioned_throughput', {}) - index_map['provisioned_throughput'] = throughput + throughput = index_data.get("provisioned_throughput", {}) + index_map["provisioned_throughput"] = throughput if throughput: - index_map['read_capacity'] = throughput.get('read_capacity_units') - index_map['write_capacity'] = throughput.get('write_capacity_units') + index_map["read_capacity"] = throughput.get("read_capacity_units") + index_map["write_capacity"] = throughput.get("write_capacity_units") - projection = index_data.get('projection', {}) + projection = index_data.get("projection", {}) if projection: - index_map['type'] = type_prefix + projection.get('projection_type') - index_map['includes'] = projection.get('non_key_attributes', []) + index_map["type"] = type_prefix + projection.get("projection_type") + index_map["includes"] = projection.get("non_key_attributes", []) return index_map except Exception as e: - module.fail_json_aws(e, msg='Decode failure', index_data=index_data) + module.fail_json_aws(e, msg="Decode failure", index_data=index_data) def compatability_results(current_table): if not current_table: return dict() - billing_mode = current_table.get('billing_mode') + billing_mode = current_table.get("billing_mode") primary_indexes = _decode_primary_index(current_table) - hash_key_name = primary_indexes.get('hash_key_name') - hash_key_type = primary_indexes.get('hash_key_type') - range_key_name = primary_indexes.get('range_key_name') - range_key_type = primary_indexes.get('range_key_type') + hash_key_name = primary_indexes.get("hash_key_name") + hash_key_type = primary_indexes.get("hash_key_type") + range_key_name = primary_indexes.get("range_key_name") + range_key_type = primary_indexes.get("range_key_type") indexes = list() - global_indexes = current_table.get('_global_index_map', {}) - local_indexes = current_table.get('_local_index_map', {}) + global_indexes = current_table.get("_global_index_map", {}) + local_indexes = current_table.get("_local_index_map", {}) for index in global_indexes: idx = dict(global_indexes[index]) - idx.pop('provisioned_throughput', None) + idx.pop("provisioned_throughput", None) indexes.append(idx) for index in local_indexes: idx = dict(local_indexes[index]) - idx.pop('provisioned_throughput', None) + idx.pop("provisioned_throughput", None) indexes.append(idx) compat_results = dict( @@ -446,72 +440,78 @@ def compatability_results(current_table): indexes=indexes, billing_mode=billing_mode, region=module.region, - table_name=current_table.get('table_name', None), - table_class=current_table.get('table_class_summary', {}).get('table_class', None), - table_status=current_table.get('table_status', None), - tags=current_table.get('tags', {}), + table_name=current_table.get("table_name", None), + table_class=current_table.get("table_class_summary", {}).get("table_class", None), + table_status=current_table.get("table_status", None), + tags=current_table.get("tags", {}), ) if billing_mode == "PROVISIONED": - throughput = current_table.get('provisioned_throughput', {}) - compat_results['read_capacity'] = throughput.get('read_capacity_units', None) - compat_results['write_capacity'] = throughput.get('write_capacity_units', None) + throughput = current_table.get("provisioned_throughput", {}) + compat_results["read_capacity"] = throughput.get("read_capacity_units", None) + compat_results["write_capacity"] = throughput.get("write_capacity_units", None) return compat_results def get_dynamodb_table(): - table_name = module.params.get('name') + table_name = module.params.get("name") try: table = _describe_table(TableName=table_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe table') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe table") - table = table['Table'] + table = table["Table"] try: - tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table['TableArn'])['Tags'] - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied when listing tags') + tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table["TableArn"])["Tags"] + except is_boto3_error_code("AccessDeniedException"): + module.warn("Permission denied when listing tags") tags = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to list table tags') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list table tags") tags = boto3_tag_list_to_ansible_dict(tags) table = camel_dict_to_snake_dict(table) # Put some of the values into places people will expect them - table['arn'] = table['table_arn'] - table['name'] = table['table_name'] - table['status'] = table['table_status'] - table['id'] = table['table_id'] - table['size'] = table['table_size_bytes'] - table['tags'] = tags + table["arn"] = table["table_arn"] + table["name"] = table["table_name"] + table["status"] = table["table_status"] + table["id"] = table["table_id"] + table["size"] = table["table_size_bytes"] + table["tags"] = tags - if 'table_class_summary' in table: - table['table_class'] = table['table_class_summary']['table_class'] + if "table_class_summary" in table: + table["table_class"] = table["table_class_summary"]["table_class"] # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST # and when updating the billing_mode - if 'billing_mode_summary' in table: - table['billing_mode'] = table['billing_mode_summary']['billing_mode'] + if "billing_mode_summary" in table: + table["billing_mode"] = table["billing_mode_summary"]["billing_mode"] else: - table['billing_mode'] = "PROVISIONED" + table["billing_mode"] = "PROVISIONED" # convert indexes into something we can easily search against - attributes = table['attribute_definitions'] + attributes = table["attribute_definitions"] global_index_map = dict() local_index_map = dict() - for index in table.get('global_secondary_indexes', []): - idx = _decode_index(index, attributes, type_prefix='global_') - global_index_map[idx['name']] = idx - for index in table.get('local_secondary_indexes', []): + for index in table.get("global_secondary_indexes", []): + idx = _decode_index(index, attributes, type_prefix="global_") + global_index_map[idx["name"]] = idx + for index in table.get("local_secondary_indexes", []): idx = _decode_index(index, attributes) - local_index_map[idx['name']] = idx - table['_global_index_map'] = global_index_map - table['_local_index_map'] = local_index_map + local_index_map[idx["name"]] = idx + table["_global_index_map"] = global_index_map + table["_local_index_map"] = local_index_map return table @@ -522,19 +522,19 @@ def _generate_attribute_map(): """ attributes = dict() - for index in (module.params, *module.params.get('indexes')): + for index in (module.params, *module.params.get("indexes")): # run through hash_key_name and range_key_name - for t in ['hash', 'range']: - key_name = index.get(t + '_key_name') + for t in ["hash", "range"]: + key_name = index.get(t + "_key_name") if not key_name: continue - key_type = index.get(t + '_key_type') or DYNAMO_TYPE_DEFAULT + key_type = index.get(t + "_key_type") or DYNAMO_TYPE_DEFAULT _type = _long_type_to_short(key_type) if key_name in attributes: if _type != attributes[key_name]: - module.fail_json(msg='Conflicting attribute type', - type_1=_type, type_2=attributes[key_name], - key_name=key_name) + module.fail_json( + msg="Conflicting attribute type", type_1=_type, type_2=attributes[key_name], key_name=key_name + ) else: attributes[key_name] = _type @@ -547,9 +547,7 @@ def _generate_attributes(): # Use ansible_dict_to_boto3_tag_list to generate the list of dicts # format we need attrs = ansible_dict_to_boto3_tag_list( - attributes, - tag_name_key_name='AttributeName', - tag_value_key_name='AttributeType' + attributes, tag_name_key_name="AttributeName", tag_value_key_name="AttributeType" ) return list(attrs) @@ -558,8 +556,8 @@ def _generate_throughput(params=None): if not params: params = module.params - read_capacity = params.get('read_capacity') or 1 - write_capacity = params.get('write_capacity') or 1 + read_capacity = params.get("read_capacity") or 1 + write_capacity = params.get("write_capacity") or 1 throughput = dict( ReadCapacityUnits=read_capacity, WriteCapacityUnits=write_capacity, @@ -573,56 +571,54 @@ def _generate_schema(params=None): params = module.params schema = list() - hash_key_name = params.get('hash_key_name') - range_key_name = params.get('range_key_name') + hash_key_name = params.get("hash_key_name") + range_key_name = params.get("range_key_name") if hash_key_name: - entry = _schema_dict(hash_key_name, 'HASH') + entry = _schema_dict(hash_key_name, "HASH") schema.append(entry) if range_key_name: - entry = _schema_dict(range_key_name, 'RANGE') + entry = _schema_dict(range_key_name, "RANGE") schema.append(entry) return schema def _primary_index_changes(current_table): - primary_index = _decode_primary_index(current_table) - hash_key_name = primary_index.get('hash_key_name') - _hash_key_name = module.params.get('hash_key_name') - hash_key_type = primary_index.get('hash_key_type') - _hash_key_type = module.params.get('hash_key_type') - range_key_name = primary_index.get('range_key_name') - _range_key_name = module.params.get('range_key_name') - range_key_type = primary_index.get('range_key_type') - _range_key_type = module.params.get('range_key_type') + hash_key_name = primary_index.get("hash_key_name") + _hash_key_name = module.params.get("hash_key_name") + hash_key_type = primary_index.get("hash_key_type") + _hash_key_type = module.params.get("hash_key_type") + range_key_name = primary_index.get("range_key_name") + _range_key_name = module.params.get("range_key_name") + range_key_type = primary_index.get("range_key_type") + _range_key_type = module.params.get("range_key_type") changed = list() if _hash_key_name and (_hash_key_name != hash_key_name): - changed.append('hash_key_name') + changed.append("hash_key_name") if _hash_key_type and (_hash_key_type != hash_key_type): - changed.append('hash_key_type') + changed.append("hash_key_type") if _range_key_name and (_range_key_name != range_key_name): - changed.append('range_key_name') + changed.append("range_key_name") if _range_key_type and (_range_key_type != range_key_type): - changed.append('range_key_type') + changed.append("range_key_type") return changed def _throughput_changes(current_table, params=None): - if not params: params = module.params - throughput = current_table.get('provisioned_throughput', {}) - read_capacity = throughput.get('read_capacity_units', None) - _read_capacity = params.get('read_capacity') or read_capacity - write_capacity = throughput.get('write_capacity_units', None) - _write_capacity = params.get('write_capacity') or write_capacity + throughput = current_table.get("provisioned_throughput", {}) + read_capacity = throughput.get("read_capacity_units", None) + _read_capacity = params.get("read_capacity") or read_capacity + write_capacity = throughput.get("write_capacity_units", None) + _write_capacity = params.get("write_capacity") or write_capacity if (read_capacity != _read_capacity) or (write_capacity != _write_capacity): return dict( @@ -642,14 +638,14 @@ def _generate_global_indexes(billing_mode): if billing_mode == "PAY_PER_REQUEST": include_throughput = False - for index in module.params.get('indexes'): - if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + for index in module.params.get("indexes"): + if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in index_exists: - module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of global indexes") # Convert the type name to upper case and remove the global_ - index['type'] = index['type'].upper()[7:] + index["type"] = index["type"].upper()[7:] index = _generate_index(index, include_throughput) index_exists[name] = True indexes.append(index) @@ -661,14 +657,13 @@ def _generate_local_indexes(): index_exists = dict() indexes = list() - for index in module.params.get('indexes'): - index = dict() - if index.get('type') not in ['all', 'include', 'keys_only']: + for index in module.params.get("indexes"): + if index.get("type") not in ["all", "include", "keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in index_exists: - module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) - index['type'] = index['type'].upper() + module.fail_json(msg=f"Duplicate key {name} in list of local indexes") + index["type"] = index["type"].upper() index = _generate_index(index, False) index_exists[name] = True indexes.append(index) @@ -678,32 +673,32 @@ def _generate_local_indexes(): def _generate_global_index_map(current_table): global_index_map = dict() - existing_indexes = current_table['_global_index_map'] - for index in module.params.get('indexes'): - if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + existing_indexes = current_table["_global_index_map"] + for index in module.params.get("indexes"): + if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in global_index_map: - module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of global indexes") idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case and remove the global_ - idx['type'] = idx['type'].upper()[7:] + idx["type"] = idx["type"].upper()[7:] global_index_map[name] = idx return global_index_map def _generate_local_index_map(current_table): local_index_map = dict() - existing_indexes = current_table['_local_index_map'] - for index in module.params.get('indexes'): - if index.get('type') not in ['all', 'include', 'keys_only']: + existing_indexes = current_table["_local_index_map"] + for index in module.params.get("indexes"): + if index.get("type") not in ["all", "include", "keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in local_index_map: - module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of local indexes") idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case - idx['type'] = idx['type'].upper() + idx["type"] = idx["type"].upper() local_index_map[name] = idx return local_index_map @@ -711,27 +706,28 @@ def _generate_local_index_map(current_table): def _generate_index(index, include_throughput=True): key_schema = _generate_schema(index) throughput = _generate_throughput(index) - non_key_attributes = index['includes'] or [] + non_key_attributes = index["includes"] or [] projection = dict( - ProjectionType=index['type'], + ProjectionType=index["type"], ) - if index['type'] != 'ALL': + if index["type"] != "ALL": if non_key_attributes: - projection['NonKeyAttributes'] = non_key_attributes + projection["NonKeyAttributes"] = non_key_attributes else: if non_key_attributes: module.fail_json( - "DynamoDB does not support specifying non-key-attributes ('includes') for " - "indexes of type 'all'. Index name: {0}".format(index['name'])) + "DynamoDB does not support specifying non-key-attributes ('includes') for indexes of type 'all'. Index" + f" name: {index['name']}" + ) idx = dict( - IndexName=index['name'], + IndexName=index["name"], KeySchema=key_schema, Projection=projection, ) if include_throughput: - idx['ProvisionedThroughput'] = throughput + idx["ProvisionedThroughput"] = throughput return idx @@ -742,15 +738,15 @@ def _attribute_changes(current_table): def _global_index_changes(current_table): - current_global_index_map = current_table['_global_index_map'] + current_global_index_map = current_table["_global_index_map"] global_index_map = _generate_global_index_map(current_table) - current_billing_mode = current_table.get('billing_mode') + current_billing_mode = current_table.get("billing_mode") - if module.params.get('billing_mode') is None: + if module.params.get("billing_mode") is None: billing_mode = current_billing_mode else: - billing_mode = module.params.get('billing_mode') + billing_mode = module.params.get("billing_mode") include_throughput = True @@ -761,7 +757,6 @@ def _global_index_changes(current_table): # TODO (future) it would be nice to add support for deleting an index for name in global_index_map: - idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput)) if name not in current_global_index_map: index_changes.append(dict(Create=idx)) @@ -798,37 +793,37 @@ def _update_table(current_table): # Get throughput / billing_mode changes throughput_changes = _throughput_changes(current_table) if throughput_changes: - changes['ProvisionedThroughput'] = throughput_changes + changes["ProvisionedThroughput"] = throughput_changes - current_billing_mode = current_table.get('billing_mode') - new_billing_mode = module.params.get('billing_mode') + current_billing_mode = current_table.get("billing_mode") + new_billing_mode = module.params.get("billing_mode") if new_billing_mode is None: new_billing_mode = current_billing_mode if current_billing_mode != new_billing_mode: - changes['BillingMode'] = new_billing_mode + changes["BillingMode"] = new_billing_mode # Update table_class use exisiting if none is defined - if module.params.get('table_class'): - if module.params.get('table_class') != current_table.get('table_class'): - changes['TableClass'] = module.params.get('table_class') + if module.params.get("table_class"): + if module.params.get("table_class") != current_table.get("table_class"): + changes["TableClass"] = module.params.get("table_class") global_index_changes = _global_index_changes(current_table) if global_index_changes: - changes['GlobalSecondaryIndexUpdates'] = global_index_changes + changes["GlobalSecondaryIndexUpdates"] = global_index_changes # Only one index can be changed at a time except if changing the billing mode, pass the first during the # main update and deal with the others on a slow retry to wait for # completion if current_billing_mode == new_billing_mode: if len(global_index_changes) > 1: - changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] + changes["GlobalSecondaryIndexUpdates"] = [global_index_changes[0]] additional_global_index_changes = global_index_changes[1:] local_index_changes = _local_index_changes(current_table) if local_index_changes: - changes['LocalSecondaryIndexUpdates'] = local_index_changes + changes["LocalSecondaryIndexUpdates"] = local_index_changes if not changes: return False @@ -837,38 +832,39 @@ def _update_table(current_table): return True if global_index_changes or local_index_changes: - changes['AttributeDefinitions'] = _generate_attributes() + changes["AttributeDefinitions"] = _generate_attributes() try: - client.update_table( - aws_retry=True, - TableName=module.params.get('name'), - **changes - ) + client.update_table(aws_retry=True, TableName=module.params.get("name"), **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update table") if additional_global_index_changes: for index in additional_global_index_changes: + wait_indexes() try: - _update_table_with_long_retry(GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes['AttributeDefinitions']) + _update_table_with_long_retry( + GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes["AttributeDefinitions"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update table", changes=changes, - additional_global_index_changes=additional_global_index_changes) - - if module.params.get('wait'): - wait_exists() + module.fail_json_aws( + e, + msg="Failed to update table", + changes=changes, + additional_global_index_changes=additional_global_index_changes, + ) return True def _update_tags(current_table): - _tags = module.params.get('tags') + _tags = module.params.get("tags") if _tags is None: return False - tags_to_add, tags_to_remove = compare_aws_tags(current_table['tags'], module.params.get('tags'), - purge_tags=module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + current_table["tags"], module.params.get("tags"), purge_tags=module.params.get("purge_tags") + ) # If neither need updating we can return already if not (tags_to_add or tags_to_remove): @@ -881,7 +877,7 @@ def _update_tags(current_table): try: client.tag_resource( aws_retry=True, - ResourceArn=current_table['arn'], + ResourceArn=current_table["arn"], Tags=ansible_dict_to_boto3_tag_list(tags_to_add), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -890,7 +886,7 @@ def _update_tags(current_table): try: client.untag_resource( aws_retry=True, - ResourceArn=current_table['arn'], + ResourceArn=current_table["arn"], TagKeys=tags_to_remove, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -902,28 +898,31 @@ def _update_tags(current_table): def update_table(current_table): primary_index_changes = _primary_index_changes(current_table) if primary_index_changes: - module.fail_json("DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format(primary_index_changes)) + module.fail_json( + f"DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {primary_index_changes}" + ) changed = False changed |= _update_table(current_table) changed |= _update_tags(current_table) - if module.params.get('wait'): + if module.params.get("wait"): wait_exists() + wait_indexes() return changed def create_table(): - table_name = module.params.get('name') - table_class = module.params.get('table_class') - hash_key_name = module.params.get('hash_key_name') - billing_mode = module.params.get('billing_mode') + table_name = module.params.get("name") + table_class = module.params.get("table_class") + hash_key_name = module.params.get("hash_key_name") + billing_mode = module.params.get("billing_mode") if billing_mode is None: billing_mode = "PROVISIONED" - tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) + tags = ansible_dict_to_boto3_tag_list(module.params.get("tags") or {}) if not hash_key_name: module.fail_json('"hash_key_name" must be provided when creating a new table.') @@ -951,21 +950,22 @@ def create_table(): ) if table_class: - params['TableClass'] = table_class + params["TableClass"] = table_class if billing_mode == "PROVISIONED": - params['ProvisionedThroughput'] = throughput + params["ProvisionedThroughput"] = throughput if local_indexes: - params['LocalSecondaryIndexes'] = local_indexes + params["LocalSecondaryIndexes"] = local_indexes if global_indexes: - params['GlobalSecondaryIndexes'] = global_indexes + params["GlobalSecondaryIndexes"] = global_indexes try: client.create_table(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to create table') + module.fail_json_aws(e, msg="Failed to create table") - if module.params.get('wait'): + if module.params.get("wait"): wait_exists() + wait_indexes() return True @@ -977,30 +977,34 @@ def delete_table(current_table): if module.check_mode: return True - table_name = module.params.get('name') + table_name = module.params.get("name") # If an index is mid-update then we have to wait for the update to complete # before deletion will succeed long_retry = AWSRetry.jittered_backoff( - retries=45, delay=5, max_delay=30, - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'], + retries=45, + delay=5, + max_delay=30, + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"], ) try: long_retry(client.delete_table)(TableName=table_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete table') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete table") - if module.params.get('wait'): + if module.params.get("wait"): wait_not_exists() return True def main(): - global module global client @@ -1008,36 +1012,36 @@ def main(): # different parameters, use a separate namespace for names, # and local indexes can't be updated. index_options = dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), # It would be nice to make this optional, but because Local and Global # indexes are mixed in here we need this to be able to tell to which # group of indexes the index belongs. - type=dict(type='str', required=True, choices=INDEX_TYPE_OPTIONS), - hash_key_name=dict(type='str', required=False), - hash_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), - range_key_name=dict(type='str', required=False), - range_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), - includes=dict(type='list', required=False, elements='str'), - read_capacity=dict(type='int', required=False), - write_capacity=dict(type='int', required=False), + type=dict(type="str", required=True, choices=INDEX_TYPE_OPTIONS), + hash_key_name=dict(type="str", required=False), + hash_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), + range_key_name=dict(type="str", required=False), + range_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), + includes=dict(type="list", required=False, elements="str"), + read_capacity=dict(type="int", required=False), + write_capacity=dict(type="int", required=False), ) argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, type='str'), - hash_key_name=dict(type='str'), - hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), - range_key_name=dict(type='str'), - range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), - billing_mode=dict(type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']), - read_capacity=dict(type='int'), - write_capacity=dict(type='int'), - indexes=dict(default=[], type='list', elements='dict', options=index_options), - table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=300, type='int', aliases=['wait_for_active_timeout']), + state=dict(default="present", choices=["present", "absent"]), + name=dict(required=True, type="str"), + hash_key_name=dict(type="str"), + hash_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), + range_key_name=dict(type="str"), + range_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), + billing_mode=dict(type="str", choices=["PROVISIONED", "PAY_PER_REQUEST"]), + read_capacity=dict(type="int"), + write_capacity=dict(type="int"), + indexes=dict(default=[], type="list", elements="dict", options=index_options), + table_class=dict(type="str", choices=["STANDARD", "STANDARD_INFREQUENT_ACCESS"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=900, type="int", aliases=["wait_for_active_timeout"]), ) module = AnsibleAWSModule( @@ -1047,41 +1051,38 @@ def main(): ) retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"], ) - client = module.client('dynamodb', retry_decorator=retry_decorator) - - if module.params.get('table_class'): - module.require_botocore_at_least('1.23.18', reason='to set table_class') + client = module.client("dynamodb", retry_decorator=retry_decorator) current_table = get_dynamodb_table() changed = False table = None results = dict() - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": if current_table: changed |= update_table(current_table) else: changed |= create_table() table = get_dynamodb_table() - elif state == 'absent': + elif state == "absent": changed |= delete_table(current_table) compat_results = compatability_results(table) if compat_results: results.update(compat_results) - results['changed'] = changed + results["changed"] = changed if table: # These are used to pass computed data about, not needed for users - table.pop('_global_index_map', None) - table.pop('_local_index_map', None) - results['table'] = table + table.pop("_global_index_map", None) + table.pop("_local_index_map", None) + results["table"] = table module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py b/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py index 9cbbb3e5e..eca236cf4 100644 --- a/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py +++ b/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: dynamodb_ttl version_added: 1.0.0 @@ -32,14 +30,15 @@ options: required: true type: str -author: Ted Timmons (@tedder) +author: +- Ted Timmons (@tedder) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: enable TTL on my cowfacts table community.aws.dynamodb_ttl: state: enable @@ -51,9 +50,9 @@ EXAMPLES = ''' state: disable table_name: cowfacts attribute_name: cow_deleted_date -''' +""" -RETURN = ''' +RETURN = r""" current_status: description: current or new TTL specification. type: dict @@ -61,59 +60,59 @@ current_status: sample: - { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" } - { "AttributeName": "deploy_timestamp", "Enabled": true } -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_current_ttl_state(c, table_name): - '''Fetch the state dict for a table.''' + """Fetch the state dict for a table.""" current_state = c.describe_time_to_live(TableName=table_name) - return current_state.get('TimeToLiveDescription') + return current_state.get("TimeToLiveDescription") def does_state_need_changing(attribute_name, desired_state, current_spec): - '''Run checks to see if the table needs to be modified. Basically a dirty check.''' + """Run checks to see if the table needs to be modified. Basically a dirty check.""" if not current_spec: # we don't have an entry (or a table?) return True - if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']: + if desired_state.lower() == "enable" and current_spec.get("TimeToLiveStatus") not in ["ENABLING", "ENABLED"]: return True - if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']: + if desired_state.lower() == "disable" and current_spec.get("TimeToLiveStatus") not in ["DISABLING", "DISABLED"]: return True - if attribute_name != current_spec.get('AttributeName'): + if attribute_name != current_spec.get("AttributeName"): return True return False def set_ttl_state(c, table_name, state, attribute_name): - '''Set our specification. Returns the update_time_to_live specification dict, - which is different than the describe_* call.''' + """Set our specification. Returns the update_time_to_live specification dict, + which is different than the describe_* call.""" is_enabled = False - if state.lower() == 'enable': + if state.lower() == "enable": is_enabled = True ret = c.update_time_to_live( TableName=table_name, TimeToLiveSpecification={ - 'Enabled': is_enabled, - 'AttributeName': attribute_name - } + "Enabled": is_enabled, + "AttributeName": attribute_name, + }, ) - return ret.get('TimeToLiveSpecification') + return ret.get("TimeToLiveSpecification") def main(): argument_spec = dict( - state=dict(choices=['enable', 'disable']), + state=dict(choices=["enable", "disable"]), table_name=dict(required=True), attribute_name=dict(required=True), ) @@ -122,26 +121,28 @@ def main(): ) try: - dbclient = module.client('dynamodb') + dbclient = module.client("dynamodb") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - result = {'changed': False} - state = module.params['state'] + result = {"changed": False} + state = module.params["state"] # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the # methods so it's easier to do here. try: - current_state = get_current_ttl_state(dbclient, module.params['table_name']) + current_state = get_current_ttl_state(dbclient, module.params["table_name"]) - if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state): + if does_state_need_changing(module.params["attribute_name"], module.params["state"], current_state): # changes needed - new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name']) - result['current_status'] = new_state - result['changed'] = True + new_state = set_ttl_state( + dbclient, module.params["table_name"], module.params["state"], module.params["attribute_name"] + ) + result["current_status"] = new_state + result["changed"] = True else: # no changes needed - result['current_status'] = current_state + result["current_status"] = current_state except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Failed to get or update ttl state") @@ -153,5 +154,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py b/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py index 15a69163d..bb5a30ea1 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_ami_copy version_added: 1.0.0 @@ -72,12 +69,12 @@ author: - Amir Moulavi (@amir343) <amir.moulavi@gmail.com> - Tim C (@defunctio) <defunct@defunct.io> extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Basic AMI Copy community.aws.ec2_ami_copy: source_region: us-east-1 @@ -107,8 +104,8 @@ EXAMPLES = ''' region: eu-west-1 source_image_id: ami-xxxxxxx tags: - Name: My-Super-AMI - Patch: 1.2.3 + Name: My-Super-AMI + Patch: 1.2.3 tag_equality: true - name: Encrypted AMI copy @@ -125,26 +122,29 @@ EXAMPLES = ''' source_image_id: ami-xxxxxxx encrypted: true kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b -''' +""" -RETURN = ''' +RETURN = r""" image_id: description: AMI ID of the copied AMI returned: always type: str sample: ami-e689729e -''' +""" try: - from botocore.exceptions import ClientError, WaiterError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass # caught by AnsibleAWSModule from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def copy_image(module, ec2): @@ -157,67 +157,67 @@ def copy_image(module, ec2): image = None changed = False - tags = module.params.get('tags') - - params = {'SourceRegion': module.params.get('source_region'), - 'SourceImageId': module.params.get('source_image_id'), - 'Name': module.params.get('name'), - 'Description': module.params.get('description'), - 'Encrypted': module.params.get('encrypted'), - } - if module.params.get('kms_key_id'): - params['KmsKeyId'] = module.params.get('kms_key_id') + tags = module.params.get("tags") + + params = { + "SourceRegion": module.params.get("source_region"), + "SourceImageId": module.params.get("source_image_id"), + "Name": module.params.get("name"), + "Description": module.params.get("description"), + "Encrypted": module.params.get("encrypted"), + } + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params.get("kms_key_id") try: - if module.params.get('tag_equality'): - filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()] - filters.append(dict(Name='state', Values=['available', 'pending'])) + if module.params.get("tag_equality"): + filters = [{"Name": f"tag:{k}", "Values": [v]} for (k, v) in module.params.get("tags").items()] + filters.append(dict(Name="state", Values=["available", "pending"])) images = ec2.describe_images(Filters=filters) - if len(images['Images']) > 0: - image = images['Images'][0] + if len(images["Images"]) > 0: + image = images["Images"][0] if not image: image = ec2.copy_image(**params) - image_id = image['ImageId'] + image_id = image["ImageId"] if tags: - ec2.create_tags(Resources=[image_id], - Tags=ansible_dict_to_boto3_tag_list(tags)) + ec2.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags)) changed = True - if module.params.get('wait'): + if module.params.get("wait"): delay = 15 - max_attempts = module.params.get('wait_timeout') // delay - image_id = image.get('ImageId') - ec2.get_waiter('image_available').wait( - ImageIds=[image_id], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + max_attempts = module.params.get("wait_timeout") // delay + image_id = image.get("ImageId") + ec2.get_waiter("image_available").wait( + ImageIds=[image_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) module.exit_json(changed=changed, **camel_dict_to_snake_dict(image)) except WaiterError as e: - module.fail_json_aws(e, msg='An error occurred waiting for the image to become available') + module.fail_json_aws(e, msg="An error occurred waiting for the image to become available") except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not copy AMI") except Exception as e: - module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") def main(): argument_spec = dict( source_region=dict(required=True), source_image_id=dict(required=True), - name=dict(default='default'), - description=dict(default=''), - encrypted=dict(type='bool', default=False, required=False), - kms_key_id=dict(type='str', required=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - tags=dict(type='dict', aliases=['resource_tags']), - tag_equality=dict(type='bool', default=False)) + name=dict(default="default"), + description=dict(default=""), + encrypted=dict(type="bool", default=False, required=False), + kms_key_id=dict(type="str", required=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + tags=dict(type="dict", aliases=["resource_tags"]), + tag_equality=dict(type="bool", default=False), + ) module = AnsibleAWSModule(argument_spec=argument_spec) - ec2 = module.client('ec2') + ec2 = module.client("ec2") copy_image(module, ec2) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway.py new file mode 100644 index 000000000..97d62b5fc --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_carrier_gateway +version_added: 6.0.0 +short_description: Manage an AWS VPC Carrier gateway +description: + - Manage an AWS VPC Carrier gateway. +author: + - "Marco Braga (@mtulio)" +options: + vpc_id: + description: + - The VPC ID for the VPC in which to manage the Carrier Gateway. + required: true + type: str + carrier_gateway_id: + description: + - The Carrier Gateway ID to manage the Carrier Gateway. + required: false + type: str + state: + description: + - Create or terminate the Carrier Gateway. + default: present + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Ensure that the VPC has an Carrier Gateway. +# The Carrier Gateway ID can be accessed via {{cagw.carrier_gateway_id}} for use in setting up Route tables etc. +- name: Create Carrier gateway + community.aws.ec2_carrier_gateway: + vpc_id: vpc-abcdefgh + state: present + register: cagw + +- name: Create Carrier gateway with tags + community.aws.ec2_carrier_gateway: + vpc_id: vpc-abcdefgh + state: present + tags: + Tag1: tag1 + Tag2: tag2 + register: cagw + +- name: Delete Carrier gateway + community.aws.ec2_carrier_gateway: + vpc_id: vpc-abcdefgh + carrier_gateway_id: "cagw-123" + state: absent + register: vpc_cagw_delete +""" + +RETURN = r""" +changed: + description: If any changes have been made to the Carrier Gateway. + type: bool + returned: always + sample: + changed: false +carrier_gateway_id: + description: The unique identifier for the Carrier Gateway. + type: str + returned: I(state=present) + sample: + carrier_gateway_id: "cagw-XXXXXXXX" +tags: + description: The tags associated the Carrier Gateway. + type: dict + returned: I(state=present) + sample: + tags: + "Ansible": "Test" +vpc_id: + description: The VPC ID associated with the Carrier Gateway. + type: str + returned: I(state=present) + sample: + vpc_id: "vpc-XXXXXXXX" +""" + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +@AWSRetry.jittered_backoff(retries=10, delay=10) +def describe_cagws_with_backoff(connection, **params): + paginator = connection.get_paginator("describe_carrier_gateways") + return paginator.paginate(**params).build_full_result()["CarrierGateways"] + + +class AnsibleEc2Cagw: + def __init__(self, module, results): + self._module = module + self._results = results + self._connection = self._module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + self._check_mode = self._module.check_mode + + def process(self): + vpc_id = self._module.params.get("vpc_id") + state = self._module.params.get("state", "present") + tags = self._module.params.get("tags") + purge_tags = self._module.params.get("purge_tags") + + if state == "present": + self.ensure_cagw_present(vpc_id, tags, purge_tags) + elif state == "absent": + self.ensure_cagw_absent(vpc_id) + + def get_matching_cagw(self, vpc_id, carrier_gateway_id=None): + """ + Returns the carrier gateway found. + Parameters: + vpc_id (str): VPC ID + carrier_gateway_id (str): Carrier Gateway ID, if specified + Returns: + cagw (dict): dict of cagw found, None if none found + """ + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id}) + try: + if not carrier_gateway_id: + cagws = describe_cagws_with_backoff( + self._connection, + Filters=filters, + ) + else: + cagws = describe_cagws_with_backoff( + self._connection, + CarrierGatewayIds=[carrier_gateway_id], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e) + + cagw = None + if len(cagws) > 1: + self._module.fail_json(msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting") + elif cagws: + cagw = camel_dict_to_snake_dict(cagws[0]) + + return cagw + + @staticmethod + def get_cagw_info(cagw, vpc_id): + return { + "carrier_gateway_id": cagw["carrier_gateway_id"], + "tags": boto3_tag_list_to_ansible_dict(cagw["tags"]), + "vpc_id": vpc_id, + } + + def ensure_cagw_absent(self, vpc_id): + cagw = self.get_matching_cagw(vpc_id) + if cagw is None: + return self._results + + if self._check_mode: + self._results["changed"] = True + return self._results + + try: + self._results["changed"] = True + self._connection.delete_carrier_gateway( + aws_retry=True, + CarrierGatewayId=cagw["carrier_gateway_id"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Unable to delete Carrier Gateway") + + return self._results + + def ensure_cagw_present(self, vpc_id, tags, purge_tags): + cagw = self.get_matching_cagw(vpc_id) + + if cagw is None: + if self._check_mode: + self._results["changed"] = True + self._results["carrier_gateway_id"] = None + return self._results + + try: + response = self._connection.create_carrier_gateway(VpcId=vpc_id, aws_retry=True) + cagw = camel_dict_to_snake_dict(response["CarrierGateway"]) + self._results["changed"] = True + except is_boto3_error_message("You must be opted into a wavelength zone to create a carrier gateway.") as e: + self._module.fail_json(msg="You must be opted into a wavelength zone to create a carrier gateway") + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="No Carrier Gateway exists.") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Unable to create Carrier Gateway") + + # Modify tags + self._results["changed"] |= ensure_ec2_tags( + self._connection, + self._module, + cagw["carrier_gateway_id"], + resource_type="carrier-gateway", + tags=tags, + purge_tags=purge_tags, + retry_codes="InvalidCarrierGatewayID.NotFound", + ) + + # Update cagw + cagw = self.get_matching_cagw(vpc_id, carrier_gateway_id=cagw["carrier_gateway_id"]) + cagw_info = self.get_cagw_info(cagw, vpc_id) + self._results.update(cagw_info) + + return self._results + + +def main(): + argument_spec = dict( + carrier_gateway_id=dict(required=False), + vpc_id=dict(required=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[["vpc_id", "carrier_gateway_id"]], + supports_check_mode=True, + ) + results = dict( + changed=False, + ) + cagw_manager = AnsibleEc2Cagw(module=module, results=results) + cagw_manager.process() + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway_info.py new file mode 100644 index 000000000..67ee30e55 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway_info.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_carrier_gateway_info +version_added: 6.0.0 +short_description: Gather information about carrier gateways in AWS +description: + - Gather information about carrier gateways in AWS. +author: + - "Marco Braga (@mtulio)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCarrierGateways.html) for possible filters. + required: false + default: {} + type: dict + carrier_gateway_ids: + description: + - Get details of specific Carrier Gateway ID. + required: false + type: list + elements: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all Carrier Gateways for an account or profile + community.aws.ec2_carrier_gateway_info: + region: ap-southeast-2 + register: cagw_info + +- name: Gather information about a filtered list of Carrier Gateways + community.aws.ec2_carrier_gateway_info: + region: ap-southeast-2 + filters: + "tag:Name": "cagw-123" + register: cagw_info + +- name: Gather information about a specific carrier gateway by CarrierGatewayId + community.aws.ec2_carrier_gateway_info: + region: ap-southeast-2 + carrier_gateway_ids: cagw-c1231234 + register: cagw_info +""" + +RETURN = r""" +changed: + description: True if listing the carrier gateways succeeds. + type: bool + returned: always + sample: "false" +carrier_gateways: + description: The carrier gateways for the account. + returned: always + type: complex + contains: + vpc_id: + description: The ID of the VPC. + returned: I(state=present) + type: str + sample: vpc-02123b67 + carrier_gateway_id: + description: The ID of the carrier gateway. + returned: I(state=present) + type: str + sample: cagw-2123634d + tags: + description: Any tags assigned to the carrier gateway. + returned: I(state=present) + type: dict + sample: + tags: + "Ansible": "Test" +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +def get_carrier_gateway_info(carrier_gateway): + tags = boto3_tag_list_to_ansible_dict(carrier_gateway["Tags"]) + ignore_list = [] + carrier_gateway_info = { + "CarrierGatewayId": carrier_gateway["CarrierGatewayId"], + "VpcId": carrier_gateway["VpcId"], + "Tags": tags, + } + + carrier_gateway_info = camel_dict_to_snake_dict(carrier_gateway_info, ignore_list=ignore_list) + return carrier_gateway_info + + +def list_carrier_gateways(connection, module): + params = dict() + + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("carrier_gateway_ids"): + params["CarrierGatewayIds"] = module.params.get("carrier_gateway_ids") + + try: + all_carrier_gateways = connection.describe_carrier_gateways(aws_retry=True, **params) + except is_boto3_error_code("InvalidCarrierGatewayID.NotFound"): + module.fail_json("CarrierGateway not found") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Unable to describe carrier gateways") + + return [get_carrier_gateway_info(cagw) for cagw in all_carrier_gateways["CarrierGateways"]] + + +def main(): + argument_spec = dict( + carrier_gateway_ids=dict(default=None, elements="str", type="list"), + filters=dict(default={}, type="dict"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Validate Requirements + try: + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + results = list_carrier_gateways(connection, module) + + module.exit_json(carrier_gateways=results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py index 3b176b5ee..19fc8eab7 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py @@ -1,25 +1,24 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_customer_gateway version_added: 1.0.0 short_description: Manage an AWS customer gateway description: - - Manage an AWS customer gateway. -author: Michael Baydoun (@MichaelBaydoun) + - Manage an AWS customer gateway. +author: + - Michael Baydoun (@MichaelBaydoun) notes: - - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the - first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent - requests do not create new customer gateway resources. - - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use - customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. + - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the + first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent + requests do not create new customer gateway resources. + - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use + customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. options: bgp_asn: description: @@ -49,13 +48,12 @@ options: choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Customer Gateway community.aws.ec2_customer_gateway: bgp_asn: 12345 @@ -71,9 +69,9 @@ EXAMPLES = ''' state: absent region: us-east-1 register: cgw -''' +""" -RETURN = ''' +RETURN = r""" gateway.customer_gateways: description: details about the gateway that was created. returned: success @@ -108,7 +106,7 @@ gateway.customer_gateways: returned: when gateway exists and is available. sample: ipsec.1 type: str -''' +""" try: import botocore @@ -117,26 +115,23 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class Ec2CustomerGatewayManager: +class Ec2CustomerGatewayManager: def __init__(self, module): self.module = module try: - self.ec2 = module.client('ec2') + self.ec2 = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) + @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=["IncorrectState"]) def ensure_cgw_absent(self, gw_id): - response = self.ec2.delete_customer_gateway( - DryRun=False, - CustomerGatewayId=gw_id - ) + response = self.ec2.delete_customer_gateway(DryRun=False, CustomerGatewayId=gw_id) return response def ensure_cgw_present(self, bgp_asn, ip_address): @@ -144,7 +139,7 @@ class Ec2CustomerGatewayManager: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, - Type='ipsec.1', + Type="ipsec.1", PublicIp=ip_address, BgpAsn=bgp_asn, ) @@ -157,11 +152,8 @@ class Ec2CustomerGatewayManager: gw_id, ], Tags=[ - { - 'Key': 'Name', - 'Value': name - }, - ] + {"Key": "Name", "Value": name}, + ], ) return response @@ -170,86 +162,84 @@ class Ec2CustomerGatewayManager: DryRun=False, Filters=[ { - 'Name': 'state', - 'Values': [ - 'available', - ] + "Name": "state", + "Values": [ + "available", + ], }, { - 'Name': 'ip-address', - 'Values': [ + "Name": "ip-address", + "Values": [ ip_address, - ] - } - ] + ], + }, + ], ) return response def main(): argument_spec = dict( - bgp_asn=dict(required=False, type='int'), + bgp_asn=dict(required=False, type="int"), ip_address=dict(required=True), name=dict(required=True), - routing=dict(default='dynamic', choices=['dynamic', 'static']), - state=dict(default='present', choices=['present', 'absent']), + routing=dict(default="dynamic", choices=["dynamic", "static"]), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ - ('routing', 'dynamic', ['bgp_asn']) - ] + ("routing", "dynamic", ["bgp_asn"]), + ], ) gw_mgr = Ec2CustomerGatewayManager(module) - name = module.params.get('name') + name = module.params.get("name") - existing = gw_mgr.describe_gateways(module.params['ip_address']) + existing = gw_mgr.describe_gateways(module.params["ip_address"]) results = dict(changed=False) - if module.params['state'] == 'present': - if existing['CustomerGateways']: - existing['CustomerGateway'] = existing['CustomerGateways'][0] - results['gateway'] = existing - if existing['CustomerGateway']['Tags']: - tag_array = existing['CustomerGateway']['Tags'] + if module.params["state"] == "present": + if existing["CustomerGateways"]: + existing["CustomerGateway"] = existing["CustomerGateways"][0] + results["gateway"] = existing + if existing["CustomerGateway"]["Tags"]: + tag_array = existing["CustomerGateway"]["Tags"] for key, value in enumerate(tag_array): - if value['Key'] == 'Name': - current_name = value['Value'] + if value["Key"] == "Name": + current_name = value["Value"] if current_name != name: - results['name'] = gw_mgr.tag_cgw_name( - results['gateway']['CustomerGateway']['CustomerGatewayId'], - module.params['name'], + results["name"] = gw_mgr.tag_cgw_name( + results["gateway"]["CustomerGateway"]["CustomerGatewayId"], + module.params["name"], ) - results['changed'] = True + results["changed"] = True else: if not module.check_mode: - results['gateway'] = gw_mgr.ensure_cgw_present( - module.params['bgp_asn'], - module.params['ip_address'], + results["gateway"] = gw_mgr.ensure_cgw_present( + module.params["bgp_asn"], + module.params["ip_address"], ) - results['name'] = gw_mgr.tag_cgw_name( - results['gateway']['CustomerGateway']['CustomerGatewayId'], - module.params['name'], + results["name"] = gw_mgr.tag_cgw_name( + results["gateway"]["CustomerGateway"]["CustomerGatewayId"], + module.params["name"], ) - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': - if existing['CustomerGateways']: - existing['CustomerGateway'] = existing['CustomerGateways'][0] - results['gateway'] = existing + elif module.params["state"] == "absent": + if existing["CustomerGateways"]: + existing["CustomerGateway"] = existing["CustomerGateways"][0] + results["gateway"] = existing if not module.check_mode: - results['gateway'] = gw_mgr.ensure_cgw_absent( - existing['CustomerGateway']['CustomerGatewayId'] - ) - results['changed'] = True + results["gateway"] = gw_mgr.ensure_cgw_absent(existing["CustomerGateway"]["CustomerGatewayId"]) + results["changed"] = True pretty_results = camel_dict_to_snake_dict(results) module.exit_json(**pretty_results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py index 429ba2083..18c1a366a 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_customer_gateway_info version_added: 1.0.0 short_description: Gather information about customer gateways in AWS description: - - Gather information about customer gateways in AWS. -author: Madhura Naniwadekar (@Madhura-CSI) + - Gather information about customer gateways in AWS. +author: + - Madhura Naniwadekar (@Madhura-CSI) options: filters: description: @@ -28,13 +27,12 @@ options: elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all customer gateways @@ -55,9 +53,9 @@ EXAMPLES = r''' - 'cgw-48841a09' - 'cgw-fec021ce' register: cust_gw_info -''' +""" -RETURN = r''' +RETURN = r""" customer_gateways: description: List of one or more customer gateways. returned: always @@ -78,60 +76,65 @@ customer_gateways: "type": "ipsec.1" } ] -''' +""" import json + try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj + return obj.isoformat() if hasattr(obj, "isoformat") else obj def list_customer_gateways(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["CustomerGatewayIds"] = module.params.get("customer_gateway_ids") try: result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler)) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']] + snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result["CustomerGateways"]] if snaked_customer_gateways: for customer_gateway in snaked_customer_gateways: - customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', [])) - customer_gateway_name = customer_gateway['tags'].get('Name') + customer_gateway["tags"] = boto3_tag_list_to_ansible_dict(customer_gateway.get("tags", [])) + customer_gateway_name = customer_gateway["tags"].get("Name") if customer_gateway_name: - customer_gateway['customer_gateway_name'] = customer_gateway_name + customer_gateway["customer_gateway_name"] = customer_gateway_name module.exit_json(changed=False, customer_gateways=snaked_customer_gateways) def main(): - argument_spec = dict( - customer_gateway_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + customer_gateway_ids=dict(default=[], type="list", elements="str"), filters=dict(default={}, type="dict") ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['customer_gateway_ids', 'filters']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ["customer_gateway_ids", "filters"], + ], + supports_check_mode=True, + ) - connection = module.client('ec2') + connection = module.client("ec2") list_customer_gateways(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py b/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py index 67fb0f43b..9fd32711f 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_launch_template version_added: 1.0.0 @@ -16,10 +15,6 @@ description: - The M(amazon.aws.ec2_instance) and M(community.aws.autoscaling_group) modules can, instead of specifying all parameters on those tasks, be passed a Launch Template which contains settings like instance size, disk type, subnet, and more. -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 author: - Ryan Scott Brown (@ryansb) options: @@ -373,7 +368,6 @@ options: type: str description: > - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). - - Requires botocore >= 1.21.29 choices: [enabled, disabled] default: 'disabled' instance_metadata_tags: @@ -381,12 +375,15 @@ options: type: str description: - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). - - Requires botocore >= 1.23.30 choices: [enabled, disabled] default: 'disabled' -''' +extends_documentation_fragment: +- amazon.aws.common.modules +- amazon.aws.region.modules +- amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an ec2 launch template community.aws.ec2_launch_template: name: "my_template" @@ -410,9 +407,9 @@ EXAMPLES = ''' state: absent # This module does not yet allow deletion of specific versions of launch templates -''' +""" -RETURN = ''' +RETURN = r""" latest_version: description: Latest available version of the launch template returned: when state=present @@ -421,82 +418,110 @@ default_version: description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always. returned: when state=present type: int -''' -import re +""" + from uuid import uuid4 +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters -try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError -except ImportError: - pass # caught by AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def determine_iam_role(module, name_or_arn): - if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): - return {'arn': name_or_arn} - iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"): + return {"arn": name_or_arn} + iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) - return {'arn': role['InstanceProfile']['Arn']} - except is_boto3_error_code('NoSuchEntity') as e: - module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) + return {"arn": role["InstanceProfile"]["Arn"]} + except is_boto3_error_code("NoSuchEntity") as e: + module.fail_json_aws(e, msg=f"Could not find instance_role {name_or_arn}") except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) + module.fail_json_aws( + e, + msg=f"An error occurred while searching for instance_role {name_or_arn}. Please try supplying the full ARN.", + ) def existing_templates(module): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) matches = None try: - if module.params.get('template_id'): - matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')], aws_retry=True) - elif module.params.get('template_name'): - matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')], aws_retry=True) - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: + if module.params.get("template_id"): + matches = ec2.describe_launch_templates( + LaunchTemplateIds=[module.params.get("template_id")], aws_retry=True + ) + elif module.params.get("template_name"): + matches = ec2.describe_launch_templates( + LaunchTemplateNames=[module.params.get("template_name")], aws_retry=True + ) + except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException") as e: # no named template was found, return nothing/empty versions return None, [] - except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format( - module.params.get('launch_template_id'))) - except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidLaunchTemplateId.Malformed") as e: # pylint: disable=duplicate-except module.fail_json_aws( - e, msg='Launch template with ID {0} could not be found, please supply a name ' - 'instead so that a new template can be created'.format(module.params.get('launch_template_id'))) + e, + msg=( + f"Launch template with ID {module.params.get('launch_template_id')} is not a valid ID. It should start" + " with `lt-....`" + ), + ) + except is_boto3_error_code("InvalidLaunchTemplateId.NotFoundException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg=( + f"Launch template with ID {module.params.get('launch_template_id')} could not be found, please supply a" + " name instead so that a new template can be created" + ), + ) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.') + module.fail_json_aws(e, msg="Could not check existing launch templates. This may be an IAM permission problem.") else: - template = matches['LaunchTemplates'][0] - template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] + template = matches["LaunchTemplates"][0] + template_id, template_version, template_default = ( + template["LaunchTemplateId"], + template["LatestVersionNumber"], + template["DefaultVersionNumber"], + ) try: - return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)['LaunchTemplateVersions'] + return ( + template, + ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)[ + "LaunchTemplateVersions" + ], + ) except (ClientError, BotoCoreError, WaiterError) as e: - module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id)) + module.fail_json_aws( + e, + msg=f"Could not find launch template versions for {template['LaunchTemplateName']} (ID: {template_id}).", + ) def params_to_launch_data(module, template_params): - if template_params.get('tags'): - tag_list = ansible_dict_to_boto3_tag_list(template_params.get('tags')) - template_params['tag_specifications'] = [ - { - 'resource_type': r_type, - 'tags': tag_list - } - for r_type in ('instance', 'volume') + if template_params.get("tags"): + tag_list = ansible_dict_to_boto3_tag_list(template_params.get("tags")) + template_params["tag_specifications"] = [ + {"resource_type": r_type, "tags": tag_list} for r_type in ("instance", "volume") ] - del template_params['tags'] - if module.params.get('iam_instance_profile'): - template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile']) + del template_params["tags"] + if module.params.get("iam_instance_profile"): + template_params["iam_instance_profile"] = determine_iam_role(module, module.params["iam_instance_profile"]) params = snake_dict_to_camel_dict( dict((k, v) for k, v in template_params.items() if v is not None), capitalize_first=True, @@ -505,71 +530,61 @@ def params_to_launch_data(module, template_params): def delete_template(module): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) deleted_versions = [] if template or template_versions: - non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']] + non_default_versions = [to_text(t["VersionNumber"]) for t in template_versions if not t["DefaultVersion"]] if non_default_versions: try: v_resp = ec2.delete_launch_template_versions( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], Versions=non_default_versions, aws_retry=True, ) - if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: - module.warn('Failed to delete template versions {0} on launch template {1}'.format( - v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'], - template['LaunchTemplateId'], - )) - deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']] + if v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"]: + module.warn( + f"Failed to delete template versions {v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']} on" + f" launch template {template['LaunchTemplateId']}" + ) + deleted_versions = [ + camel_dict_to_snake_dict(v) for v in v_resp["SuccessfullyDeletedLaunchTemplateVersions"] + ] except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId'])) + module.fail_json_aws( + e, + msg=f"Could not delete existing versions of the launch template {template['LaunchTemplateId']}", + ) try: resp = ec2.delete_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], aws_retry=True, ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId'])) + module.fail_json_aws(e, msg=f"Could not delete launch template {template['LaunchTemplateId']}") return { - 'deleted_versions': deleted_versions, - 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']), - 'changed': True, + "deleted_versions": deleted_versions, + "deleted_template": camel_dict_to_snake_dict(resp["LaunchTemplate"]), + "changed": True, } else: - return {'changed': False} + return {"changed": False} def create_or_update(module, template_options): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound'])) + ec2 = module.client( + "ec2", retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidLaunchTemplateId.NotFound"]) + ) template, template_versions = existing_templates(module) out = {} lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) - if lt_data.get('MetadataOptions'): - if not module.botocore_at_least('1.23.30'): - # fail only if enabled is requested - if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled': - module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags') - # pop if it's not requested to keep backwards compatibility. - # otherwise the modules failes because parameters are set due default values - lt_data['MetadataOptions'].pop('InstanceMetadataTags') - - if not module.botocore_at_least('1.21.29'): - # fail only if enabled is requested - if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled': - module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6') - # pop if it's not requested to keep backwards compatibility. - # otherwise the modules failes because parameters are set due default values - lt_data['MetadataOptions'].pop('HttpProtocolIpv6') - if not (template or template_versions): # create a full new one try: resp = ec2.create_launch_template( - LaunchTemplateName=module.params['template_name'], + LaunchTemplateName=module.params["template_name"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, aws_retry=True, @@ -577,26 +592,26 @@ def create_or_update(module, template_options): except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create launch template") template, template_versions = existing_templates(module) - out['changed'] = True + out["changed"] = True elif template and template_versions: most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1] if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get( "VersionDescription", "" ): - out['changed'] = False + out["changed"] = False return out try: - if module.params.get('source_version') in (None, ''): + if module.params.get("source_version") in (None, ""): resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, VersionDescription=str(module.params["version_description"]), aws_retry=True, ) - elif module.params.get('source_version') == 'latest': + elif module.params.get("source_version") == "latest": resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, SourceVersion=str(most_recent["VersionNumber"]), @@ -605,15 +620,22 @@ def create_or_update(module, template_options): ) else: try: - int(module.params.get('source_version')) + int(module.params.get("source_version")) except ValueError: - module.fail_json(msg='source_version param was not a valid integer, got "{0}"'.format(module.params.get('source_version'))) + module.fail_json( + msg=f"source_version param was not a valid integer, got \"{module.params.get('source_version')}\"" + ) # get source template version - source_version = next((v for v in template_versions if v['VersionNumber'] == int(module.params.get('source_version'))), None) + source_version = next( + (v for v in template_versions if v["VersionNumber"] == int(module.params.get("source_version"))), + None, + ) if source_version is None: - module.fail_json(msg='source_version does not exist, got "{0}"'.format(module.params.get('source_version'))) + module.fail_json( + msg=f"source_version does not exist, got \"{module.params.get('source_version')}\"" + ) resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, SourceVersion=str(source_version["VersionNumber"]), @@ -621,31 +643,33 @@ def create_or_update(module, template_options): aws_retry=True, ) - if module.params.get('default_version') in (None, ''): + if module.params.get("default_version") in (None, ""): # no need to do anything, leave the existing version as default pass - elif module.params.get('default_version') == 'latest': + elif module.params.get("default_version") == "latest": set_default = ec2.modify_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], - DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']), + LaunchTemplateId=template["LaunchTemplateId"], + DefaultVersion=to_text(resp["LaunchTemplateVersion"]["VersionNumber"]), ClientToken=uuid4().hex, aws_retry=True, ) else: try: - int(module.params.get('default_version')) + int(module.params.get("default_version")) except ValueError: - module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version'))) + module.fail_json( + msg=f"default_version param was not a valid integer, got \"{module.params.get('default_version')}\"" + ) set_default = ec2.modify_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], - DefaultVersion=to_text(int(module.params.get('default_version'))), + LaunchTemplateId=template["LaunchTemplateId"], + DefaultVersion=to_text(int(module.params.get("default_version"))), ClientToken=uuid4().hex, aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create subsequent launch template version") template, template_versions = existing_templates(module) - out['changed'] = True + out["changed"] = True return out @@ -655,43 +679,38 @@ def format_module_output(module): template = camel_dict_to_snake_dict(template) template_versions = [camel_dict_to_snake_dict(v) for v in template_versions] for v in template_versions: - for ts in (v['launch_template_data'].get('tag_specifications') or []): - ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags')) + for ts in v["launch_template_data"].get("tag_specifications") or []: + ts["tags"] = boto3_tag_list_to_ansible_dict(ts.pop("tags")) output.update(dict(template=template, versions=template_versions)) - output['default_template'] = [ - v for v in template_versions - if v.get('default_version') - ][0] - output['latest_template'] = [ - v for v in template_versions - if ( - v.get('version_number') and - int(v['version_number']) == int(template['latest_version_number']) - ) + output["default_template"] = [v for v in template_versions if v.get("default_version")][0] + output["latest_template"] = [ + v + for v in template_versions + if (v.get("version_number") and int(v["version_number"]) == int(template["latest_version_number"])) ][0] - if "version_number" in output['default_template']: - output['default_version'] = output['default_template']['version_number'] - if "version_number" in output['latest_template']: - output['latest_version'] = output['latest_template']['version_number'] + if "version_number" in output["default_template"]: + output["default_version"] = output["default_template"]["version_number"] + if "version_number" in output["latest_template"]: + output["latest_version"] = output["latest_template"]["version_number"] return output def main(): template_options = dict( block_device_mappings=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( device_name=dict(), ebs=dict( - type='dict', + type="dict", options=dict( - delete_on_termination=dict(type='bool'), - encrypted=dict(type='bool'), - iops=dict(type='int'), + delete_on_termination=dict(type="bool"), + encrypted=dict(type="bool"), + iops=dict(type="int"), kms_key_id=dict(), snapshot_id=dict(), - volume_size=dict(type='int'), + volume_size=dict(type="int"), volume_type=dict(), ), ), @@ -700,39 +719,39 @@ def main(): ), ), cpu_options=dict( - type='dict', + type="dict", options=dict( - core_count=dict(type='int'), - threads_per_core=dict(type='int'), + core_count=dict(type="int"), + threads_per_core=dict(type="int"), ), ), credit_specification=dict( - dict(type='dict'), + dict(type="dict"), options=dict( cpu_credits=dict(), ), ), - disable_api_termination=dict(type='bool'), - ebs_optimized=dict(type='bool'), + disable_api_termination=dict(type="bool"), + ebs_optimized=dict(type="bool"), elastic_gpu_specifications=dict( options=dict(type=dict()), - type='list', - elements='dict', + type="list", + elements="dict", ), iam_instance_profile=dict(), image_id=dict(), - instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']), + instance_initiated_shutdown_behavior=dict(choices=["stop", "terminate"]), instance_market_options=dict( - type='dict', + type="dict", options=dict( market_type=dict(), spot_options=dict( - type='dict', + type="dict", options=dict( - block_duration_minutes=dict(type='int'), - instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']), + block_duration_minutes=dict(type="int"), + instance_interruption_behavior=dict(choices=["hibernate", "stop", "terminate"]), max_price=dict(), - spot_instance_type=dict(choices=['one-time', 'persistent']), + spot_instance_type=dict(choices=["one-time", "persistent"]), ), ), ), @@ -741,32 +760,30 @@ def main(): kernel_id=dict(), key_name=dict(), monitoring=dict( - type='dict', - options=dict( - enabled=dict(type='bool') - ), + type="dict", + options=dict(enabled=dict(type="bool")), ), metadata_options=dict( - type='dict', + type="dict", options=dict( - http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), - http_put_response_hop_limit=dict(type='int', default=1), - http_tokens=dict(choices=['optional', 'required'], default='optional'), - http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), - instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), - ) + http_endpoint=dict(choices=["enabled", "disabled"], default="enabled"), + http_put_response_hop_limit=dict(type="int", default=1), + http_tokens=dict(choices=["optional", "required"], default="optional"), + http_protocol_ipv6=dict(choices=["disabled", "enabled"], default="disabled"), + instance_metadata_tags=dict(choices=["disabled", "enabled"], default="disabled"), + ), ), network_interfaces=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - associate_public_ip_address=dict(type='bool'), - delete_on_termination=dict(type='bool'), + associate_public_ip_address=dict(type="bool"), + delete_on_termination=dict(type="bool"), description=dict(), - device_index=dict(type='int'), - groups=dict(type='list', elements='str'), - ipv6_address_count=dict(type='int'), - ipv6_addresses=dict(type='list', elements='str'), + device_index=dict(type="int"), + groups=dict(type="list", elements="str"), + ipv6_address_count=dict(type="int"), + ipv6_addresses=dict(type="list", elements="str"), network_interface_id=dict(), private_ip_address=dict(), subnet_id=dict(), @@ -780,12 +797,12 @@ def main(): host_id=dict(), tenancy=dict(), ), - type='dict', + type="dict", ), ram_disk_id=dict(), - security_group_ids=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), + security_group_ids=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), user_data=dict(), ) @@ -803,25 +820,25 @@ def main(): module = AnsibleAWSModule( argument_spec=arg_spec, required_one_of=[ - ('template_name', 'template_id') + ("template_name", "template_id"), ], - supports_check_mode=True + supports_check_mode=True, ) - for interface in (module.params.get('network_interfaces') or []): - if interface.get('ipv6_addresses'): - interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']] + for interface in module.params.get("network_interfaces") or []: + if interface.get("ipv6_addresses"): + interface["ipv6_addresses"] = [{"ipv6_address": x} for x in interface["ipv6_addresses"]] - if module.params.get('state') == 'present': + if module.params.get("state") == "present": out = create_or_update(module, template_options) out.update(format_module_output(module)) - elif module.params.get('state') == 'absent': + elif module.params.get("state") == "absent": out = delete_template(module) else: - module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state'))) + module.fail_json(msg=f"Unsupported value \"{module.params.get('state')}\" for `state` parameter") module.exit_json(**out) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py b/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py index c27917df9..3cdb5be21 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py @@ -1,22 +1,21 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_placement_group version_added: 1.0.0 short_description: Create or delete an EC2 Placement Group description: - - Create an EC2 Placement Group; if the placement group already exists, - nothing is done. Or, delete an existing placement group. If the placement - group is absent, do nothing. See also - U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) -author: "Brad Macpherson (@iiibrad)" + - Create an EC2 Placement Group; if the placement group already exists, + nothing is done. Or, delete an existing placement group. If the placement + group is absent, do nothing. See also + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +author: + - "Brad Macpherson (@iiibrad)" options: name: description: @@ -45,12 +44,12 @@ options: choices: [ 'cluster', 'spread', 'partition' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide # for details. @@ -76,11 +75,9 @@ EXAMPLES = ''' community.aws.ec2_placement_group: name: my-cluster state: absent +""" -''' - - -RETURN = ''' +RETURN = r""" placement_group: description: Placement group attributes returned: when state != absent @@ -98,17 +95,17 @@ placement_group: description: PG strategy type: str sample: "cluster" - -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule @AWSRetry.exponential_backoff() @@ -118,40 +115,32 @@ def search_placement_group(connection, module): """ name = module.params.get("name") try: - response = connection.describe_placement_groups( - Filters=[{ - "Name": "group-name", - "Values": [name] - }]) + response = connection.describe_placement_groups(Filters=[{"Name": "group-name", "Values": [name]}]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg="Couldn't find placement group named [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't find placement group named [{name}]") - if len(response['PlacementGroups']) != 1: + if len(response["PlacementGroups"]) != 1: return None else: - placement_group = response['PlacementGroups'][0] + placement_group = response["PlacementGroups"][0] return { - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], } -@AWSRetry.exponential_backoff(catch_extra_error_codes=['InvalidPlacementGroup.Unknown']) +@AWSRetry.exponential_backoff(catch_extra_error_codes=["InvalidPlacementGroup.Unknown"]) def get_placement_group_information(connection, name): """ Retrieve information about a placement group. """ - response = connection.describe_placement_groups( - GroupNames=[name] - ) - placement_group = response['PlacementGroups'][0] + response = connection.describe_placement_groups(GroupNames=[name]) + placement_group = response["PlacementGroups"][0] return { - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], } @@ -161,32 +150,34 @@ def create_placement_group(connection, module): strategy = module.params.get("strategy") partition_count = module.params.get("partition_count") - if strategy != 'partition' and partition_count: - module.fail_json( - msg="'partition_count' can only be set when strategy is set to 'partition'.") + if strategy != "partition" and partition_count: + module.fail_json(msg="'partition_count' can only be set when strategy is set to 'partition'.") params = {} - params['GroupName'] = name - params['Strategy'] = strategy + params["GroupName"] = name + params["Strategy"] = strategy if partition_count: - params['PartitionCount'] = partition_count - params['DryRun'] = module.check_mode + params["PartitionCount"] = partition_count + params["DryRun"] = module.check_mode try: connection.create_placement_group(**params) - except is_boto3_error_code('DryRunOperation'): - module.exit_json(changed=True, placement_group={ - "name": name, - "state": 'DryRun', - "strategy": strategy, - }) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg="Couldn't create placement group [%s]" % name) - - module.exit_json(changed=True, - placement_group=get_placement_group_information(connection, name)) + except is_boto3_error_code("DryRunOperation"): + module.exit_json( + changed=True, + placement_group={ + "name": name, + "state": "DryRun", + "strategy": strategy, + }, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't create placement group [{name}]") + + module.exit_json(changed=True, placement_group=get_placement_group_information(connection, name)) @AWSRetry.exponential_backoff() @@ -194,52 +185,42 @@ def delete_placement_group(connection, module): name = module.params.get("name") try: - connection.delete_placement_group( - GroupName=name, DryRun=module.check_mode) + connection.delete_placement_group(GroupName=name, DryRun=module.check_mode) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg="Couldn't delete placement group [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't delete placement group [{name}]") module.exit_json(changed=True) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - partition_count=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition']) + name=dict(required=True, type="str"), + partition_count=dict(type="int"), + state=dict(default="present", choices=["present", "absent"]), + strategy=dict(default="cluster", choices=["cluster", "spread", "partition"]), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client("ec2") state = module.params.get("state") - if state == 'present': + if state == "present": placement_group = search_placement_group(connection, module) if placement_group is None: create_placement_group(connection, module) else: strategy = module.params.get("strategy") - if placement_group['strategy'] == strategy: - module.exit_json( - changed=False, placement_group=placement_group) + if placement_group["strategy"] == strategy: + module.exit_json(changed=False, placement_group=placement_group) else: name = module.params.get("name") module.fail_json( - msg=("Placement group '{}' exists, can't change strategy" + - " from '{}' to '{}'").format( - name, - placement_group['strategy'], - strategy)) + msg=f"Placement group '{name}' exists, can't change strategy from '{placement_group['strategy']}' to '{strategy}'" + ) - elif state == 'absent': + elif state == "absent": placement_group = search_placement_group(connection, module) if placement_group is None: module.exit_json(changed=False) @@ -247,5 +228,5 @@ def main(): delete_placement_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py b/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py index d22f133ae..05b37488c 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_placement_group_info version_added: 1.0.0 short_description: List EC2 Placement Group(s) details description: - - List details of EC2 Placement Group(s). -author: "Brad Macpherson (@iiibrad)" + - List details of EC2 Placement Group(s). +author: + - "Brad Macpherson (@iiibrad)" options: names: description: @@ -24,13 +23,12 @@ options: required: false default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details or the AWS region, # see the AWS Guide for details. @@ -41,18 +39,17 @@ EXAMPLES = r''' - name: List two placement groups. community.aws.ec2_placement_group_info: names: - - my-cluster - - my-other-cluster + - my-cluster + - my-other-cluster register: specific_ec2_placement_groups - ansible.builtin.debug: msg: > {{ specific_ec2_placement_groups | json_query("[?name=='my-cluster']") }} +""" -''' - -RETURN = r''' +RETURN = r""" placement_groups: description: Placement group attributes returned: always @@ -70,57 +67,61 @@ placement_groups: description: PG strategy type: str sample: "cluster" +""" -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: - from botocore.exceptions import (BotoCoreError, ClientError) + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def get_placement_groups_details(connection, module): names = module.params.get("names") try: if len(names) > 0: response = connection.describe_placement_groups( - Filters=[{ - "Name": "group-name", - "Values": names - }]) + Filters=[ + { + "Name": "group-name", + "Values": names, + } + ] + ) else: response = connection.describe_placement_groups() except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg="Couldn't find placement groups named [%s]" % names) + module.fail_json_aws(e, msg=f"Couldn't find placement groups named [{names}]") results = [] - for placement_group in response['PlacementGroups']: - results.append({ - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], - }) + for placement_group in response["PlacementGroups"]: + results.append( + { + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], + } + ) return results def main(): argument_spec = dict( - names=dict(type='list', default=[], elements='str') + names=dict(type="list", default=[], elements="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - connection = module.client('ec2') + connection = module.client("ec2") placement_groups = get_placement_groups_details(connection, module) module.exit_json(changed=False, placement_groups=placement_groups) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py b/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py index f45be4417..2cf994caa 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_snapshot_copy version_added: 1.0.0 @@ -57,12 +54,12 @@ options: author: - Deepak Kothandan (@Deepakkothandan) <deepak.kdy@gmail.com> extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Basic Snapshot Copy community.aws.ec2_snapshot_copy: source_region: eu-central-1 @@ -84,7 +81,7 @@ EXAMPLES = ''' region: eu-west-1 source_snapshot_id: snap-xxxxxxx tags: - Name: Snapshot-Name + Name: Snapshot-Name - name: Encrypted Snapshot copy community.aws.ec2_snapshot_copy: @@ -100,24 +97,25 @@ EXAMPLES = ''' source_snapshot_id: snap-xxxxxxx encrypted: true kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b -''' +""" -RETURN = ''' +RETURN = r""" snapshot_id: description: snapshot id of the newly created snapshot returned: when snapshot copy is successful type: str sample: "snap-e9095e8c" -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def copy_snapshot(module, ec2): """ @@ -128,34 +126,33 @@ def copy_snapshot(module, ec2): """ params = { - 'SourceRegion': module.params.get('source_region'), - 'SourceSnapshotId': module.params.get('source_snapshot_id'), - 'Description': module.params.get('description') + "SourceRegion": module.params.get("source_region"), + "SourceSnapshotId": module.params.get("source_snapshot_id"), + "Description": module.params.get("description"), } - if module.params.get('encrypted'): - params['Encrypted'] = True + if module.params.get("encrypted"): + params["Encrypted"] = True - if module.params.get('kms_key_id'): - params['KmsKeyId'] = module.params.get('kms_key_id') + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params.get("kms_key_id") - if module.params.get('tags'): - params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags'), types=['snapshot']) + if module.params.get("tags"): + params["TagSpecifications"] = boto3_tag_specifications(module.params.get("tags"), types=["snapshot"]) try: - snapshot_id = ec2.copy_snapshot(**params)['SnapshotId'] - if module.params.get('wait'): + snapshot_id = ec2.copy_snapshot(**params)["SnapshotId"] + if module.params.get("wait"): delay = 15 # Add one to max_attempts as wait() increment # its counter before assessing it for time.sleep() - max_attempts = (module.params.get('wait_timeout') // delay) + 1 - ec2.get_waiter('snapshot_completed').wait( - SnapshotIds=[snapshot_id], - WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) + max_attempts = (module.params.get("wait_timeout") // delay) + 1 + ec2.get_waiter("snapshot_completed").wait( + SnapshotIds=[snapshot_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='An error occurred waiting for the snapshot to become available.') + module.fail_json_aws(e, msg="An error occurred waiting for the snapshot to become available.") module.exit_json(changed=True, snapshot_id=snapshot_id) @@ -164,23 +161,23 @@ def main(): argument_spec = dict( source_region=dict(required=True), source_snapshot_id=dict(required=True), - description=dict(default=''), - encrypted=dict(type='bool', default=False, required=False), - kms_key_id=dict(type='str', required=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - tags=dict(type='dict', aliases=['resource_tags']), + description=dict(default=""), + encrypted=dict(type="bool", default=False, required=False), + kms_key_id=dict(type="str", required=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + tags=dict(type="dict", aliases=["resource_tags"]), ) module = AnsibleAWSModule(argument_spec=argument_spec) try: - client = module.client('ec2') + client = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") copy_snapshot(module, client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py index 298646cf8..19876984d 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_transit_gateway short_description: Create and delete AWS Transit Gateways version_added: 1.0.0 @@ -74,13 +72,13 @@ options: author: - "Bob Boldin (@BobBoldin)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new transit gateway using defaults community.aws.ec2_transit_gateway: state: present @@ -93,9 +91,9 @@ EXAMPLES = ''' asn: 64514 auto_associate: false auto_propagate: false - dns_support: True + dns_support: true description: "nonprod transit gateway" - purge_tags: False + purge_tags: false state: present region: us-east-1 tags: @@ -114,9 +112,9 @@ EXAMPLES = ''' region: ap-southeast-2 transit_gateway_id: tgw-3a9aa123 register: deleted_tgw -''' +""" -RETURN = ''' +RETURN = r""" transit_gateway: description: The attributes of the transit gateway. type: complex @@ -210,49 +208,53 @@ transit_gateway: returned: always type: str sample: tgw-3a9aa123 -''' +""" + +from time import sleep +from time import time try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by imported AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from time import sleep, time -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class AnsibleEc2Tgw(object): +class AnsibleEc2Tgw(object): def __init__(self, module, results): self._module = module self._results = results retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['IncorrectState'], + catch_extra_error_codes=["IncorrectState"], ) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) self._connection = connection self._check_mode = self._module.check_mode def process(self): - """ Process the request based on state parameter . - state = present will search for an existing tgw based and return the object data. - if no object is found it will be created - - state = absent will attempt to remove the tgw however will fail if it still has - attachments or associations - """ - description = self._module.params.get('description') - state = self._module.params.get('state', 'present') - tgw_id = self._module.params.get('transit_gateway_id') - - if state == 'present': + """Process the request based on state parameter . + state = present will search for an existing tgw based and return the object data. + if no object is found it will be created + + state = absent will attempt to remove the tgw however will fail if it still has + attachments or associations + """ + description = self._module.params.get("description") + state = self._module.params.get("state", "present") + tgw_id = self._module.params.get("transit_gateway_id") + + if state == "present": self.ensure_tgw_present(tgw_id, description) - elif state == 'absent': + elif state == "absent": self.ensure_tgw_absent(tgw_id, description) def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): @@ -276,13 +278,13 @@ class AnsibleEc2Tgw(object): if transit_gateway: if self._check_mode: - transit_gateway['state'] = status + transit_gateway["state"] = status - if transit_gateway.get('state') == status: + if transit_gateway.get("state") == status: status_achieved = True break - elif transit_gateway.get('state') == 'failed': + elif transit_gateway.get("state") == "failed": break else: @@ -292,13 +294,12 @@ class AnsibleEc2Tgw(object): self._module.fail_json_aws(e) if not status_achieved: - self._module.fail_json( - msg="Wait time out reached, while waiting for results") + self._module.fail_json(msg="Wait time out reached, while waiting for results") return transit_gateway def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): - """ search for an existing tgw by either tgw_id or description + """search for an existing tgw by either tgw_id or description :param tgw_id: The AWS id of the transit gateway :param description: The description of the transit gateway. :param skip_deleted: ignore deleted transit gateways @@ -306,7 +307,7 @@ class AnsibleEc2Tgw(object): """ filters = [] if tgw_id: - filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id}) + filters = ansible_dict_to_boto3_filter_list({"transit-gateway-id": tgw_id}) try: response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters) @@ -316,20 +317,21 @@ class AnsibleEc2Tgw(object): tgw = None tgws = [] - if len(response.get('TransitGateways', [])) == 1 and tgw_id: - if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted: - tgws.extend(response['TransitGateways']) + if len(response.get("TransitGateways", [])) == 1 and tgw_id: + if (response["TransitGateways"][0]["State"] != "deleted") or not skip_deleted: + tgws.extend(response["TransitGateways"]) - for gateway in response.get('TransitGateways', []): - if description == gateway['Description'] and gateway['State'] != 'deleted': + for gateway in response.get("TransitGateways", []): + if description == gateway["Description"] and gateway["State"] != "deleted": tgws.append(gateway) if len(tgws) > 1: self._module.fail_json( - msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description)) + msg=f"EC2 returned more than one transit Gateway for description {description}, aborting" + ) elif tgws: - tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags']) - tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags']) + tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=["Tags"]) + tgw["tags"] = boto3_tag_list_to_ansible_dict(tgws[0]["Tags"]) return tgw @@ -349,31 +351,31 @@ class AnsibleEc2Tgw(object): :return dict: transit gateway object """ options = dict() - wait = self._module.params.get('wait') - wait_timeout = self._module.params.get('wait_timeout') + wait = self._module.params.get("wait") + wait_timeout = self._module.params.get("wait_timeout") - if self._module.params.get('asn'): - options['AmazonSideAsn'] = self._module.params.get('asn') + if self._module.params.get("asn"): + options["AmazonSideAsn"] = self._module.params.get("asn") - options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach')) - options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate')) - options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate')) - options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support')) - options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support')) + options["AutoAcceptSharedAttachments"] = self.enable_option_flag(self._module.params.get("auto_attach")) + options["DefaultRouteTableAssociation"] = self.enable_option_flag(self._module.params.get("auto_associate")) + options["DefaultRouteTablePropagation"] = self.enable_option_flag(self._module.params.get("auto_propagate")) + options["VpnEcmpSupport"] = self.enable_option_flag(self._module.params.get("vpn_ecmp_support")) + options["DnsSupport"] = self.enable_option_flag(self._module.params.get("dns_support")) try: response = self._connection.create_transit_gateway(Description=description, Options=options) except (ClientError, BotoCoreError) as e: self._module.fail_json_aws(e) - tgw_id = response['TransitGateway']['TransitGatewayId'] + tgw_id = response["TransitGateway"]["TransitGatewayId"] if wait: result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available") else: result = self.get_matching_tgw(tgw_id=tgw_id) - self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id'])) + self._results["msg"] = f" Transit gateway {result['transit_gateway_id']} created" return result @@ -384,8 +386,8 @@ class AnsibleEc2Tgw(object): :param tgw_id: The id of the transit gateway :return dict: transit gateway object """ - wait = self._module.params.get('wait') - wait_timeout = self._module.params.get('wait_timeout') + wait = self._module.params.get("wait") + wait_timeout = self._module.params.get("wait_timeout") try: response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id) @@ -393,11 +395,13 @@ class AnsibleEc2Tgw(object): self._module.fail_json_aws(e) if wait: - result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False) + result = self.wait_for_status( + wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False + ) else: result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False) - self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id)) + self._results["msg"] = f" Transit gateway {tgw_id} deleted" return result @@ -414,25 +418,27 @@ class AnsibleEc2Tgw(object): if tgw is None: if self._check_mode: - self._results['changed'] = True - self._results['transit_gateway_id'] = None + self._results["changed"] = True + self._results["transit_gateway_id"] = None return self._results try: if not description: self._module.fail_json(msg="Failed to create Transit Gateway: description argument required") tgw = self.create_tgw(description) - self._results['changed'] = True + self._results["changed"] = True except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg='Unable to create Transit Gateway') - - self._results['changed'] |= ensure_ec2_tags( - self._connection, self._module, tgw['transit_gateway_id'], - tags=self._module.params.get('tags'), - purge_tags=self._module.params.get('purge_tags'), + self._module.fail_json_aws(e, msg="Unable to create Transit Gateway") + + self._results["changed"] |= ensure_ec2_tags( + self._connection, + self._module, + tgw["transit_gateway_id"], + tags=self._module.params.get("tags"), + purge_tags=self._module.params.get("purge_tags"), ) - self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id']) + self._results["transit_gateway"] = self.get_matching_tgw(tgw_id=tgw["transit_gateway_id"]) return self._results @@ -444,21 +450,22 @@ class AnsibleEc2Tgw(object): :param description: The description of the transit gateway. :return doct: transit gateway object """ - self._results['transit_gateway_id'] = None + self._results["transit_gateway_id"] = None tgw = self.get_matching_tgw(tgw_id, description) if tgw is not None: if self._check_mode: - self._results['changed'] = True + self._results["changed"] = True return self._results try: - tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id']) - self._results['changed'] = True - self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'], - skip_deleted=False) + tgw = self.delete_tgw(tgw_id=tgw["transit_gateway_id"]) + self._results["changed"] = True + self._results["transit_gateway"] = self.get_matching_tgw( + tgw_id=tgw["transit_gateway_id"], skip_deleted=False + ) except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway') + self._module.fail_json_aws(e, msg="Unable to delete Transit Gateway") return self._results @@ -470,24 +477,24 @@ def setup_module_object(): """ argument_spec = dict( - asn=dict(type='int'), - auto_associate=dict(type='bool', default=True), - auto_attach=dict(type='bool', default=False), - auto_propagate=dict(type='bool', default=True), - description=dict(type='str'), - dns_support=dict(type='bool', default=True), - purge_tags=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='dict', aliases=['resource_tags']), - transit_gateway_id=dict(type='str'), - vpn_ecmp_support=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300) + asn=dict(type="int"), + auto_associate=dict(type="bool", default=True), + auto_attach=dict(type="bool", default=False), + auto_propagate=dict(type="bool", default=True), + description=dict(type="str"), + dns_support=dict(type="bool", default=True), + purge_tags=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + transit_gateway_id=dict(type="str"), + vpn_ecmp_support=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('description', 'transit_gateway_id')], + required_one_of=[("description", "transit_gateway_id")], supports_check_mode=True, ) @@ -495,12 +502,9 @@ def setup_module_object(): def main(): - module = setup_module_object() - results = dict( - changed=False - ) + results = dict(changed=False) tgw_manager = AnsibleEc2Tgw(module=module, results=results) tgw_manager.process() @@ -508,5 +512,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py index 5ce3dc6a4..b25346b84 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py @@ -1,19 +1,17 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_transit_gateway_info short_description: Gather information about ec2 transit gateways in AWS version_added: 1.0.0 description: - - Gather information about ec2 transit gateways in AWS -author: "Bob Boldin (@BobBoldin)" + - Gather information about ec2 transit gateways in AWS +author: + - "Bob Boldin (@BobBoldin)" options: transit_gateway_ids: description: @@ -29,13 +27,12 @@ options: type: dict default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather info about all transit gateways @@ -57,9 +54,9 @@ EXAMPLES = r''' transit_gateway_ids: - tgw-02c42332e6b7da829 - tgw-03c53443d5a8cb716 -''' +""" -RETURN = r''' +RETURN = r""" transit_gateways: description: > Transit gateways that match the provided filters. Each element consists of a dict with all the information @@ -162,7 +159,7 @@ transit_gateways: returned: always type: str sample: "tgw-02c42332e6b7da829" -''' +""" try: import botocore @@ -171,19 +168,19 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class AnsibleEc2TgwInfo(object): +class AnsibleEc2TgwInfo(object): def __init__(self, module, results): self._module = module self._results = results - self._connection = self._module.client('ec2') + self._connection = self._module.client("ec2") self._check_mode = self._module.check_mode @AWSRetry.exponential_backoff() @@ -195,8 +192,8 @@ class AnsibleEc2TgwInfo(object): connection : boto3 client connection object """ # collect parameters - filters = ansible_dict_to_boto3_filter_list(self._module.params['filters']) - transit_gateway_ids = self._module.params['transit_gateway_ids'] + filters = ansible_dict_to_boto3_filter_list(self._module.params["filters"]) + transit_gateway_ids = self._module.params["transit_gateway_ids"] # init empty list for return vars transit_gateway_info = list() @@ -204,17 +201,18 @@ class AnsibleEc2TgwInfo(object): # Get the basic transit gateway info try: response = self._connection.describe_transit_gateways( - TransitGatewayIds=transit_gateway_ids, Filters=filters) - except is_boto3_error_code('InvalidTransitGatewayID.NotFound'): - self._results['transit_gateways'] = [] + TransitGatewayIds=transit_gateway_ids, Filters=filters + ) + except is_boto3_error_code("InvalidTransitGatewayID.NotFound"): + self._results["transit_gateways"] = [] return - for transit_gateway in response['TransitGateways']: - transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags'])) + for transit_gateway in response["TransitGateways"]: + transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=["Tags"])) # convert tag list to ansible dict - transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', [])) + transit_gateway_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(transit_gateway.get("Tags", [])) - self._results['transit_gateways'] = transit_gateway_info + self._results["transit_gateways"] = transit_gateway_info return @@ -225,8 +223,8 @@ def setup_module_object(): """ argument_spec = dict( - transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']), - filters=dict(type='dict', default={}) + transit_gateway_ids=dict(type="list", default=[], elements="str", aliases=["transit_gateway_id"]), + filters=dict(type="dict", default={}), ) module = AnsibleAWSModule( @@ -238,12 +236,9 @@ def setup_module_object(): def main(): - module = setup_module_object() - results = dict( - changed=False - ) + results = dict(changed=False) tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results) try: @@ -254,5 +249,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py index 554059021..cfb6809a8 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_transit_gateway_vpc_attachment short_description: Create and delete AWS Transit Gateway VPC attachments version_added: 4.0.0 @@ -98,26 +96,26 @@ options: author: - "Mark Chappell (@tremble)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create a Transit Gateway attachment - community.aws.ec2_transit_gateway_vpc_attachment: state: present transit_gateway: 'tgw-123456789abcdef01' name: AnsibleTest-1 subnets: - - subnet-00000000000000000 - - subnet-11111111111111111 - - subnet-22222222222222222 - ipv6_support: True - purge_subnets: True - dns_support: True - appliance_mode_support: True + - subnet-00000000000000000 + - subnet-11111111111111111 + - subnet-22222222222222222 + ipv6_support: true + purge_subnets: true + dns_support: true + appliance_mode_support: true tags: TestTag: changed data in Test Tag @@ -126,18 +124,18 @@ EXAMPLES = ''' state: present id: 'tgw-attach-0c0c5fd0b0f01d1c9' name: AnsibleTest-1 - ipv6_support: True - purge_subnets: False - dns_support: False - appliance_mode_support: True + ipv6_support: true + purge_subnets: false + dns_support: false + appliance_mode_support: true # Delete the transit gateway - community.aws.ec2_transit_gateway_vpc_attachment: state: absent id: 'tgw-attach-0c0c5fd0b0f01d1c9' -''' +""" -RETURN = ''' +RETURN = r""" transit_gateway_attachments: description: The attributes of the Transit Gateway attachments. type: list @@ -216,34 +214,31 @@ transit_gateway_attachments: type: str returned: success example: '123456789012' -''' - - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +""" +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager def main(): - argument_spec = dict( - state=dict(type='str', required=False, default='present', choices=['absent', 'present']), - transit_gateway=dict(type='str', required=False, aliases=['transit_gateway_id']), - id=dict(type='str', required=False, aliases=['attachment_id']), - name=dict(type='str', required=False), - subnets=dict(type='list', elements='str', required=False), - purge_subnets=dict(type='bool', required=False, default=True), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - appliance_mode_support=dict(type='bool', required=False), - dns_support=dict(type='bool', required=False), - ipv6_support=dict(type='bool', required=False), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + state=dict(type="str", required=False, default="present", choices=["absent", "present"]), + transit_gateway=dict(type="str", required=False, aliases=["transit_gateway_id"]), + id=dict(type="str", required=False, aliases=["attachment_id"]), + name=dict(type="str", required=False), + subnets=dict(type="list", elements="str", required=False), + purge_subnets=dict(type="bool", required=False, default=True), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + appliance_mode_support=dict(type="bool", required=False), + dns_support=dict(type="bool", required=False), + ipv6_support=dict(type="bool", required=False), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) one_of = [ - ['id', 'transit_gateway', 'name'], + ["id", "transit_gateway", "name"], ] module = AnsibleAWSModule( @@ -252,55 +247,68 @@ def main(): required_one_of=one_of, ) - attach_id = module.params.get('id', None) - tgw = module.params.get('transit_gateway', None) - name = module.params.get('name', None) - tags = module.params.get('tags', None) - purge_tags = module.params.get('purge_tags') - state = module.params.get('state') - subnets = module.params.get('subnets', None) - purge_subnets = module.params.get('purge_subnets') + attach_id = module.params.get("id", None) + tgw = module.params.get("transit_gateway", None) + name = module.params.get("name", None) + tags = module.params.get("tags", None) + purge_tags = module.params.get("purge_tags") + state = module.params.get("state") + subnets = module.params.get("subnets", None) + purge_subnets = module.params.get("purge_subnets") # When not provided with an ID see if one exists. if not attach_id: search_manager = TransitGatewayVpcAttachmentManager(module=module) filters = dict() if tgw: - filters['transit-gateway-id'] = tgw + filters["transit-gateway-id"] = tgw if name: - filters['tag:Name'] = name + filters["tag:Name"] = name if subnets: vpc_id = search_manager.subnets_to_vpc(subnets) - filters['vpc-id'] = vpc_id + filters["vpc-id"] = vpc_id # Attachments lurk in a 'deleted' state, for a while, ignore them so we # can reuse the names - filters['state'] = [ - 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', - 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + filters["state"] = [ + "available", + "deleting", + "failed", + "failing", + "initiatingRequest", + "modifying", + "pendingAcceptance", + "pending", + "rollingBack", + "rejected", + "rejecting", ] attachments = search_manager.list(filters=filters) if len(attachments) > 1: - module.fail_json('Multiple matching attachments found, provide an ID', attachments=attachments) + module.fail_json("Multiple matching attachments found, provide an ID", attachments=attachments) # If we find a match then we'll modify it by ID, otherwise we'll be # creating a new RTB. if attachments: - attach_id = attachments[0]['transit_gateway_attachment_id'] + attach_id = attachments[0]["transit_gateway_attachment_id"] manager = TransitGatewayVpcAttachmentManager(module=module, id=attach_id) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': + if state == "absent": manager.delete() else: if not attach_id: if not tgw: - module.fail_json('No existing attachment found. To create a new attachment' - ' the `transit_gateway` parameter must be provided.') + module.fail_json( + "No existing attachment found. To create a new attachment" + " the `transit_gateway` parameter must be provided." + ) if not subnets: - module.fail_json('No existing attachment found. To create a new attachment' - ' the `subnets` parameter must be provided.') + module.fail_json( + "No existing attachment found. To create a new attachment" + " the `subnets` parameter must be provided." + ) # name is just a special case of tags. if name: @@ -314,9 +322,9 @@ def main(): manager.set_transit_gateway(tgw) manager.set_subnets(subnets, purge_subnets) manager.set_tags(tags, purge_tags) - manager.set_dns_support(module.params.get('dns_support', None)) - manager.set_ipv6_support(module.params.get('ipv6_support', None)) - manager.set_appliance_mode_support(module.params.get('appliance_mode_support', None)) + manager.set_dns_support(module.params.get("dns_support", None)) + manager.set_ipv6_support(module.params.get("ipv6_support", None)) + manager.set_appliance_mode_support(module.params.get("appliance_mode_support", None)) manager.flush_changes() results = dict( @@ -324,7 +332,7 @@ def main(): attachments=[manager.updated_resource], ) if manager.changed: - results['diff'] = dict( + results["diff"] = dict( before=manager.original_resource, after=manager.updated_resource, ) @@ -332,5 +340,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py index b76b0b0f7..a665e4080 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_transit_gateway_vpc_attachment_info short_description: describes AWS Transit Gateway VPC attachments version_added: 4.0.0 @@ -39,14 +37,15 @@ options: type: bool required: false default: false -author: "Mark Chappell (@tremble)" +author: + - "Mark Chappell (@tremble)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe a specific Transit Gateway attachment. - community.aws.ec2_transit_gateway_vpc_attachment_info: id: 'tgw-attach-0123456789abcdef0' @@ -60,9 +59,9 @@ EXAMPLES = ''' - community.aws.ec2_transit_gateway_vpc_attachment_info: filters: transit-gateway-id: tgw-0fedcba9876543210' -''' +""" -RETURN = ''' +RETURN = r""" transit_gateway_attachments: description: The attributes of the Transit Gateway attachments. type: list @@ -141,26 +140,23 @@ transit_gateway_attachments: type: str returned: success example: '123456789012' -''' - - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +""" +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager def main(): - argument_spec = dict( - id=dict(type='str', required=False, aliases=['attachment_id']), - name=dict(type='str', required=False), - filters=dict(type='dict', required=False), - include_deleted=dict(type='bool', required=False, default=False) + id=dict(type="str", required=False, aliases=["attachment_id"]), + name=dict(type="str", required=False), + filters=dict(type="dict", required=False), + include_deleted=dict(type="bool", required=False, default=False), ) mutually_exclusive = [ - ['id', 'name'], - ['id', 'filters'], + ["id", "name"], + ["id", "filters"], ] module = AnsibleAWSModule( @@ -168,22 +164,31 @@ def main(): supports_check_mode=True, ) - name = module.params.get('name', None) - id = module.params.get('id', None) - opt_filters = module.params.get('filters', None) + name = module.params.get("name", None) + id = module.params.get("id", None) + opt_filters = module.params.get("filters", None) search_manager = TransitGatewayVpcAttachmentManager(module=module) filters = dict() if name: - filters['tag:Name'] = name + filters["tag:Name"] = name - if not module.params.get('include_deleted'): + if not module.params.get("include_deleted"): # Attachments lurk in a 'deleted' state, for a while, ignore them so we # can reuse the names - filters['state'] = [ - 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', - 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + filters["state"] = [ + "available", + "deleting", + "failed", + "failing", + "initiatingRequest", + "modifying", + "pendingAcceptance", + "pending", + "rollingBack", + "rejected", + "rejecting", ] if opt_filters: @@ -194,5 +199,5 @@ def main(): module.exit_json(changed=False, attachments=attachments, filters=filters) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py index dbcf15b12..1bd65f501 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_egress_igw version_added: 1.0.0 short_description: Manage an AWS VPC Egress Only Internet gateway description: - - Manage an AWS VPC Egress Only Internet gateway -author: Daniel Shepherd (@shepdelacreme) + - Manage an AWS VPC Egress Only Internet gateway +author: + - Daniel Shepherd (@shepdelacreme) options: vpc_id: description: @@ -27,13 +26,12 @@ options: choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Ensure that the VPC has an Internet Gateway. @@ -42,10 +40,9 @@ EXAMPLES = ''' vpc_id: vpc-abcdefgh state: present register: eigw +""" -''' - -RETURN = ''' +RETURN = r""" gateway_id: description: The ID of the Egress Only Internet Gateway or Null. returned: always @@ -56,7 +53,7 @@ vpc_id: returned: always type: str sample: vpc-012345678 -''' +""" try: import botocore @@ -65,9 +62,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def delete_eigw(module, connection, eigw_id): @@ -82,16 +80,18 @@ def delete_eigw(module, connection, eigw_id): try: response = connection.delete_egress_only_internet_gateway( - aws_retry=True, - DryRun=module.check_mode, - EgressOnlyInternetGatewayId=eigw_id) - except is_boto3_error_code('DryRunOperation'): + aws_retry=True, DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id + ) + except is_boto3_error_code("DryRunOperation"): changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Could not delete Egress-Only Internet Gateway {eigw_id} from VPC {module.vpc_id}") if not module.check_mode: - changed = response.get('ReturnCode', False) + changed = response.get("ReturnCode", False) return changed @@ -109,29 +109,33 @@ def create_eigw(module, connection, vpc_id): try: response = connection.create_egress_only_internet_gateway( - aws_retry=True, - DryRun=module.check_mode, - VpcId=vpc_id) - except is_boto3_error_code('DryRunOperation'): + aws_retry=True, DryRun=module.check_mode, VpcId=vpc_id + ) + except is_boto3_error_code("DryRunOperation"): # When boto3 method is run with DryRun=True it returns an error on success # We need to catch the error and return something valid changed = True - except is_boto3_error_code('InvalidVpcID.NotFound') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) + except is_boto3_error_code("InvalidVpcID.NotFound") as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"invalid vpc ID '{vpc_id}' provided") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Could not create Egress-Only Internet Gateway for vpc ID {vpc_id}") if not module.check_mode: - gateway = response.get('EgressOnlyInternetGateway', {}) - state = gateway.get('Attachments', [{}])[0].get('State') - gateway_id = gateway.get('EgressOnlyInternetGatewayId') + gateway = response.get("EgressOnlyInternetGateway", {}) + state = gateway.get("Attachments", [{}])[0].get("State") + gateway_id = gateway.get("EgressOnlyInternetGatewayId") - if gateway_id and state in ('attached', 'attaching'): + if gateway_id and state in ("attached", "attaching"): changed = True else: # EIGW gave back a bad attachment state or an invalid response so we error out - module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id), - **camel_dict_to_snake_dict(response)) + module.fail_json( + msg=f"Unable to create and attach Egress Only Internet Gateway to VPCId: {vpc_id}. Bad or no state in response", + **camel_dict_to_snake_dict(response), + ) return changed, gateway_id @@ -147,45 +151,41 @@ def describe_eigws(module, connection, vpc_id): gateway_id = None try: - response = connection.describe_egress_only_internet_gateways( - aws_retry=True) + response = connection.describe_egress_only_internet_gateways(aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways") - for eigw in response.get('EgressOnlyInternetGateways', []): - for attachment in eigw.get('Attachments', []): - if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'): - gateway_id = eigw.get('EgressOnlyInternetGatewayId') + for eigw in response.get("EgressOnlyInternetGateways", []): + for attachment in eigw.get("Attachments", []): + if attachment.get("VpcId") == vpc_id and attachment.get("State") in ("attached", "attaching"): + gateway_id = eigw.get("EgressOnlyInternetGatewayId") return gateway_id def main(): - argument_spec = dict( - vpc_id=dict(required=True), - state=dict(default='present', choices=['present', 'absent']) - ) + argument_spec = dict(vpc_id=dict(required=True), state=dict(default="present", choices=["present", "absent"])) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) - vpc_id = module.params.get('vpc_id') - state = module.params.get('state') + vpc_id = module.params.get("vpc_id") + state = module.params.get("state") eigw_id = describe_eigws(module, connection, vpc_id) result = dict(gateway_id=eigw_id, vpc_id=vpc_id) changed = False - if state == 'present' and not eigw_id: - changed, result['gateway_id'] = create_eigw(module, connection, vpc_id) - elif state == 'absent' and eigw_id: + if state == "present" and not eigw_id: + changed, result["gateway_id"] = create_eigw(module, connection, vpc_id) + elif state == "absent" and eigw_id: changed = delete_eigw(module, connection, eigw_id) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py index e11df3de5..cf109de1c 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_nacl short_description: create and delete Network ACLs version_added: 1.0.0 @@ -73,18 +71,18 @@ options: type: str choices: ['present', 'absent'] default: present -author: Mike Mochan (@mmochan) +author: + - Mike Mochan (@mmochan) +notes: + - Support for I(purge_tags) was added in release 4.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -notes: - - Support for I(purge_tags) was added in release 4.0.0. -''' - -EXAMPLES = r''' +""" +EXAMPLES = r""" # Complete example to create and delete a network ACL # that allows SSH, HTTP and ICMP in, and all traffic out. - name: "Create and associate production DMZ network ACL with DMZ subnets" @@ -98,16 +96,16 @@ EXAMPLES = r''' Project: phoenix Description: production DMZ ingress: - # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code, - # port from, port to - - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22] - - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80] - - [205, 'tcp', 'allow', '::/0', null, null, 80, 80] - - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8] - - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8] + # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code, + # port from, port to + - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22] + - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80] + - [205, 'tcp', 'allow', '::/0', null, null, 80, 80] + - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8] + - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8] egress: - - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null] - - [105, 'all', 'allow', '::/0', null, null, null, null] + - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null] + - [105, 'all', 'allow', '::/0', null, null, null, null] state: 'present' - name: "Remove the ingress and egress rules - defaults to deny all" @@ -141,8 +139,9 @@ EXAMPLES = r''' community.aws.ec2_vpc_nacl: nacl_id: acl-33b4ee5b state: absent -''' -RETURN = r''' +""" + +RETURN = r""" task: description: The result of the create, or delete action. returned: success @@ -152,47 +151,48 @@ nacl_id: returned: success type: str sample: acl-123456789abcdef01 -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58} +PROTOCOL_NUMBERS = {"all": -1, "icmp": 1, "tcp": 6, "udp": 17, "ipv6-icmp": 58} # Utility methods def icmp_present(entry): - if len(entry) == 6 and entry[1] in ['icmp', 'ipv6-icmp'] or entry[1] in [1, 58]: + if len(entry) == 6 and entry[1] in ["icmp", "ipv6-icmp"] or entry[1] in [1, 58]: return True def subnets_removed(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) - associations = results['NetworkAcls'][0]['Associations'] - subnet_ids = [assoc['SubnetId'] for assoc in associations] + associations = results["NetworkAcls"][0]["Associations"] + subnet_ids = [assoc["SubnetId"] for assoc in associations] return [subnet for subnet in subnet_ids if subnet not in subnets] def subnets_added(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) - associations = results['NetworkAcls'][0]['Associations'] - subnet_ids = [assoc['SubnetId'] for assoc in associations] + associations = results["NetworkAcls"][0]["Associations"] + subnet_ids = [assoc["SubnetId"] for assoc in associations] return [subnet for subnet in subnets if subnet not in subnet_ids] def subnets_changed(nacl, client, module): changed = False - vpc_id = module.params.get('vpc_id') - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + vpc_id = module.params.get("vpc_id") + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] subnets = subnets_to_associate(nacl, client, module) if not subnets: default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] @@ -218,40 +218,41 @@ def subnets_changed(nacl, client, module): def nacls_changed(nacl, client, module): changed = False params = dict() - params['egress'] = module.params.get('egress') - params['ingress'] = module.params.get('ingress') + params["egress"] = module.params.get("egress") + params["ingress"] = module.params.get("ingress") - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] nacl = describe_network_acl(client, module) - entries = nacl['NetworkAcls'][0]['Entries'] - egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767] - ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767] - if rules_changed(egress, params['egress'], True, nacl_id, client, module): + entries = nacl["NetworkAcls"][0]["Entries"] + egress = [rule for rule in entries if rule["Egress"] is True and rule["RuleNumber"] < 32767] + ingress = [rule for rule in entries if rule["Egress"] is False and rule["RuleNumber"] < 32767] + if rules_changed(egress, params["egress"], True, nacl_id, client, module): changed = True - if rules_changed(ingress, params['ingress'], False, nacl_id, client, module): + if rules_changed(ingress, params["ingress"], False, nacl_id, client, module): changed = True return changed def tags_changed(nacl_id, client, module): - tags = module.params.get('tags') - name = module.params.get('name') - purge_tags = module.params.get('purge_tags') + tags = module.params.get("tags") + name = module.params.get("name") + purge_tags = module.params.get("purge_tags") if name is None and tags is None: return False - if module.params.get('tags') is None: + if module.params.get("tags") is None: # Only purge tags if tags is explicitly set to {} and purge_tags is True purge_tags = False new_tags = dict() - if module.params.get('name') is not None: - new_tags['Name'] = module.params.get('name') - new_tags.update(module.params.get('tags') or {}) + if module.params.get("name") is not None: + new_tags["Name"] = module.params.get("name") + new_tags.update(module.params.get("tags") or {}) - return ensure_ec2_tags(client, module, nacl_id, tags=new_tags, - purge_tags=purge_tags, retry_codes=['InvalidNetworkAclID.NotFound']) + return ensure_ec2_tags( + client, module, nacl_id, tags=new_tags, purge_tags=purge_tags, retry_codes=["InvalidNetworkAclID.NotFound"] + ) def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): @@ -266,60 +267,60 @@ def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): if removed_rules: params = dict() for rule in removed_rules: - params['NetworkAclId'] = nacl_id - params['RuleNumber'] = rule['RuleNumber'] - params['Egress'] = Egress + params["NetworkAclId"] = nacl_id + params["RuleNumber"] = rule["RuleNumber"] + params["Egress"] = Egress delete_network_acl_entry(params, client, module) changed = True added_rules = [x for x in rules if x not in aws_rules] if added_rules: for rule in added_rules: - rule['NetworkAclId'] = nacl_id + rule["NetworkAclId"] = nacl_id create_network_acl_entry(rule, client, module) changed = True return changed def is_ipv6(cidr): - return ':' in cidr + return ":" in cidr def process_rule_entry(entry, Egress): params = dict() - params['RuleNumber'] = entry[0] - params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]]) - params['RuleAction'] = entry[2] - params['Egress'] = Egress + params["RuleNumber"] = entry[0] + params["Protocol"] = str(PROTOCOL_NUMBERS[entry[1]]) + params["RuleAction"] = entry[2] + params["Egress"] = Egress if is_ipv6(entry[3]): - params['Ipv6CidrBlock'] = entry[3] + params["Ipv6CidrBlock"] = entry[3] else: - params['CidrBlock'] = entry[3] + params["CidrBlock"] = entry[3] if icmp_present(entry): - params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])} + params["IcmpTypeCode"] = {"Type": int(entry[4]), "Code": int(entry[5])} else: if entry[6] or entry[7]: - params['PortRange'] = {"From": entry[6], 'To': entry[7]} + params["PortRange"] = {"From": entry[6], "To": entry[7]} return params def restore_default_associations(assoc_ids, default_nacl_id, client, module): if assoc_ids: params = dict() - params['NetworkAclId'] = default_nacl_id[0] + params["NetworkAclId"] = default_nacl_id[0] for assoc_id in assoc_ids: - params['AssociationId'] = assoc_id + params["AssociationId"] = assoc_id restore_default_acl_association(params, client, module) return True def construct_acl_entries(nacl, client, module): - for entry in module.params.get('ingress'): + for entry in module.params.get("ingress"): params = process_rule_entry(entry, Egress=False) - params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] create_network_acl_entry(params, client, module) - for rule in module.params.get('egress'): + for rule in module.params.get("egress"): params = process_rule_entry(rule, Egress=True) - params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] create_network_acl_entry(params, client, module) @@ -327,21 +328,21 @@ def construct_acl_entries(nacl, client, module): def setup_network_acl(client, module): changed = False nacl = describe_network_acl(client, module) - if not nacl['NetworkAcls']: + if not nacl["NetworkAcls"]: tags = {} - if module.params.get('name'): - tags['Name'] = module.params.get('name') - tags.update(module.params.get('tags') or {}) - nacl = create_network_acl(module.params.get('vpc_id'), client, module, tags) - nacl_id = nacl['NetworkAcl']['NetworkAclId'] + if module.params.get("name"): + tags["Name"] = module.params.get("name") + tags.update(module.params.get("tags") or {}) + nacl = create_network_acl(module.params.get("vpc_id"), client, module, tags) + nacl_id = nacl["NetworkAcl"]["NetworkAclId"] subnets = subnets_to_associate(nacl, client, module) replace_network_acl_association(nacl_id, subnets, client, module) construct_acl_entries(nacl, client, module) changed = True - return changed, nacl['NetworkAcl']['NetworkAclId'] + return changed, nacl["NetworkAcl"]["NetworkAclId"] else: changed = False - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] changed |= subnets_changed(nacl, client, module) changed |= nacls_changed(nacl, client, module) changed |= tags_changed(nacl_id, client, module) @@ -352,11 +353,11 @@ def remove_network_acl(client, module): changed = False result = dict() nacl = describe_network_acl(client, module) - if nacl['NetworkAcls']: - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] - vpc_id = nacl['NetworkAcls'][0]['VpcId'] - associations = nacl['NetworkAcls'][0]['Associations'] - assoc_ids = [a['NetworkAclAssociationId'] for a in associations] + if nacl["NetworkAcls"]: + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] + vpc_id = nacl["NetworkAcls"][0]["VpcId"] + associations = nacl["NetworkAcls"][0]["Associations"] + assoc_ids = [a["NetworkAclAssociationId"] for a in associations] default_nacl_id = find_default_vpc_nacl(vpc_id, client, module) if not default_nacl_id: result = {vpc_id: "Default NACL ID not found - Check the VPC ID"} @@ -383,7 +384,7 @@ def _create_network_acl(client, *args, **kwargs): def create_network_acl(vpc_id, client, module, tags): params = dict(VpcId=vpc_id) if tags: - params['TagSpecifications'] = boto3_tag_specifications(tags, ['network-acl']) + params["TagSpecifications"] = boto3_tag_specifications(tags, ["network-acl"]) try: if module.check_mode: nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000")) @@ -394,7 +395,7 @@ def create_network_acl(vpc_id, client, module, tags): return nacl -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _create_network_acl_entry(client, *args, **kwargs): return client.create_network_acl_entry(*args, **kwargs) @@ -420,7 +421,7 @@ def delete_network_acl(nacl_id, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _delete_network_acl_entry(client, *args, **kwargs): return client.delete_network_acl_entry(*args, **kwargs) @@ -438,7 +439,7 @@ def _describe_network_acls(client, **kwargs): return client.describe_network_acls(**kwargs) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _describe_network_acls_retry_missing(client, **kwargs): return client.describe_network_acls(**kwargs) @@ -447,25 +448,23 @@ def describe_acl_associations(subnets, client, module): if not subnets: return [] try: - results = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'association.subnet-id', 'Values': subnets} - ]) + results = _describe_network_acls_retry_missing( + client, Filters=[{"Name": "association.subnet-id", "Values": subnets}] + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - associations = results['NetworkAcls'][0]['Associations'] - return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets] + associations = results["NetworkAcls"][0]["Associations"] + return [a["NetworkAclAssociationId"] for a in associations if a["SubnetId"] in subnets] def describe_network_acl(client, module): try: - if module.params.get('nacl_id'): - nacl = _describe_network_acls(client, Filters=[ - {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]} - ]) + if module.params.get("nacl_id"): + nacl = _describe_network_acls( + client, Filters=[{"Name": "network-acl-id", "Values": [module.params.get("nacl_id")]}] + ) else: - nacl = _describe_network_acls(client, Filters=[ - {'Name': 'tag:Name', 'Values': [module.params.get('name')]} - ]) + nacl = _describe_network_acls(client, Filters=[{"Name": "tag:Name", "Values": [module.params.get("name")]}]) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) return nacl @@ -480,38 +479,37 @@ def find_acl_by_id(nacl_id, client, module): def find_default_vpc_nacl(vpc_id, client, module): try: - response = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'vpc-id', 'Values': [vpc_id]}]) + response = _describe_network_acls_retry_missing(client, Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - nacls = response['NetworkAcls'] - return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True] + nacls = response["NetworkAcls"] + return [n["NetworkAclId"] for n in nacls if n["IsDefault"] is True] def find_subnet_ids_by_nacl_id(nacl_id, client, module): try: - results = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'association.network-acl-id', 'Values': [nacl_id]} - ]) + results = _describe_network_acls_retry_missing( + client, Filters=[{"Name": "association.network-acl-id", "Values": [nacl_id]}] + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - if results['NetworkAcls']: - associations = results['NetworkAcls'][0]['Associations'] - return [s['SubnetId'] for s in associations if s['SubnetId']] + if results["NetworkAcls"]: + associations = results["NetworkAcls"][0]["Associations"] + return [s["SubnetId"] for s in associations if s["SubnetId"]] else: return [] -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_association(client, *args, **kwargs): return client.replace_network_acl_association(*args, **kwargs) def replace_network_acl_association(nacl_id, subnets, client, module): params = dict() - params['NetworkAclId'] = nacl_id + params["NetworkAclId"] = nacl_id for association in describe_acl_associations(subnets, client, module): - params['AssociationId'] = association + params["AssociationId"] = association try: if not module.check_mode: _replace_network_acl_association(client, **params) @@ -519,7 +517,7 @@ def replace_network_acl_association(nacl_id, subnets, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_entry(client, *args, **kwargs): return client.replace_network_acl_entry(*args, **kwargs) @@ -527,7 +525,7 @@ def _replace_network_acl_entry(client, *args, **kwargs): def replace_network_acl_entry(entries, Egress, nacl_id, client, module): for entry in entries: params = entry - params['NetworkAclId'] = nacl_id + params["NetworkAclId"] = nacl_id try: if not module.check_mode: _replace_network_acl_entry(client, **params) @@ -535,7 +533,7 @@ def replace_network_acl_entry(entries, Egress, nacl_id, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_association(client, *args, **kwargs): return client.replace_network_acl_association(*args, **kwargs) @@ -554,25 +552,23 @@ def _describe_subnets(client, *args, **kwargs): def subnets_to_associate(nacl, client, module): - params = list(module.params.get('subnets')) + params = list(module.params.get("subnets")) if not params: return [] all_found = [] if any(x.startswith("subnet-") for x in params): try: - subnets = _describe_subnets(client, Filters=[ - {'Name': 'subnet-id', 'Values': params}]) - all_found.extend(subnets.get('Subnets', [])) + subnets = _describe_subnets(client, Filters=[{"Name": "subnet-id", "Values": params}]) + all_found.extend(subnets.get("Subnets", [])) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) if len(params) != len(all_found): try: - subnets = _describe_subnets(client, Filters=[ - {'Name': 'tag:Name', 'Values': params}]) - all_found.extend(subnets.get('Subnets', [])) + subnets = _describe_subnets(client, Filters=[{"Name": "tag:Name", "Values": params}]) + all_found.extend(subnets.get("Subnets", [])) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId'))) + return list(set(s["SubnetId"] for s in all_found if s.get("SubnetId"))) def main(): @@ -580,29 +576,31 @@ def main(): vpc_id=dict(), name=dict(), nacl_id=dict(), - subnets=dict(required=False, type='list', default=list(), elements='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), - ingress=dict(required=False, type='list', default=list(), elements='list'), - egress=dict(required=False, type='list', default=list(), elements='list'), - state=dict(default='present', choices=['present', 'absent']), + subnets=dict(required=False, type="list", default=list(), elements="str"), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(required=False, type="bool", default=True), + ingress=dict(required=False, type="list", default=list(), elements="list"), + egress=dict(required=False, type="list", default=list(), elements="list"), + state=dict(default="present", choices=["present", "absent"]), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[["name", "nacl_id"]], + required_if=[["state", "present", ["vpc_id"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[['name', 'nacl_id']], - required_if=[['state', 'present', ['vpc_id']]]) - state = module.params.get('state').lower() + state = module.params.get("state").lower() - client = module.client('ec2') + client = module.client("ec2") invocations = { "present": setup_network_acl, - "absent": remove_network_acl + "absent": remove_network_acl, } (changed, results) = invocations[state](client, module) module.exit_json(changed=changed, nacl_id=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py index b85c94236..d95508a89 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py @@ -1,18 +1,18 @@ #!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_nacl_info version_added: 1.0.0 short_description: Gather information about Network ACLs in an AWS VPC description: - - Gather information about Network ACLs in an AWS VPC -author: "Brad Davidson (@brandond)" + - Gather information about Network ACLs in an AWS VPC +author: + - "Brad Davidson (@brandond)" options: nacl_ids: description: @@ -34,12 +34,12 @@ notes: - By default, the module will return all Network ACLs. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all Network ACLs: @@ -55,9 +55,9 @@ EXAMPLES = r''' filters: 'default': 'true' register: default_nacls -''' +""" -RETURN = r''' +RETURN = r""" nacls: description: Returns an array of complex objects as described below. returned: success @@ -100,7 +100,7 @@ nacls: type: list elements: list sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]] -''' +""" try: import botocore @@ -109,20 +109,19 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'} +PROTOCOL_NAMES = {"-1": "all", "1": "icmp", "6": "tcp", "17": "udp"} def list_ec2_vpc_nacls(connection, module): - nacl_ids = module.params.get("nacl_ids") filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) @@ -131,86 +130,97 @@ def list_ec2_vpc_nacls(connection, module): try: nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters) - except is_boto3_error_code('InvalidNetworkAclID.NotFound'): - module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) + except is_boto3_error_code("InvalidNetworkAclID.NotFound"): + module.fail_json(msg="Unable to describe ACL. NetworkAcl does not exist") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to describe network ACLs {nacl_ids}") # Turn the boto3 result in to ansible_friendly_snaked_names snaked_nacls = [] - for nacl in nacls['NetworkAcls']: + for nacl in nacls["NetworkAcls"]: snaked_nacls.append(camel_dict_to_snake_dict(nacl)) # Turn the boto3 result in to ansible friendly tag dictionary for nacl in snaked_nacls: - if 'tags' in nacl: - nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value') - if 'entries' in nacl: - nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] - if entry['rule_number'] < 32767 and entry['egress']] - nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] - if entry['rule_number'] < 32767 and not entry['egress']] - del nacl['entries'] - if 'associations' in nacl: - nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']] - del nacl['associations'] - if 'network_acl_id' in nacl: - nacl['nacl_id'] = nacl['network_acl_id'] - del nacl['network_acl_id'] + if "tags" in nacl: + nacl["tags"] = boto3_tag_list_to_ansible_dict(nacl["tags"], "key", "value") + if "entries" in nacl: + nacl["egress"] = [ + nacl_entry_to_list(entry) + for entry in nacl["entries"] + if entry["rule_number"] < 32767 and entry["egress"] + ] + nacl["ingress"] = [ + nacl_entry_to_list(entry) + for entry in nacl["entries"] + if entry["rule_number"] < 32767 and not entry["egress"] + ] + del nacl["entries"] + if "associations" in nacl: + nacl["subnets"] = [a["subnet_id"] for a in nacl["associations"]] + del nacl["associations"] + if "network_acl_id" in nacl: + nacl["nacl_id"] = nacl["network_acl_id"] + del nacl["network_acl_id"] module.exit_json(nacls=snaked_nacls) def nacl_entry_to_list(entry): - # entry list format # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to] elist = [] - elist.append(entry['rule_number']) + elist.append(entry["rule_number"]) - if entry.get('protocol') in PROTOCOL_NAMES: - elist.append(PROTOCOL_NAMES[entry['protocol']]) + if entry.get("protocol") in PROTOCOL_NAMES: + elist.append(PROTOCOL_NAMES[entry["protocol"]]) else: - elist.append(entry.get('protocol')) + elist.append(entry.get("protocol")) - elist.append(entry['rule_action']) + elist.append(entry["rule_action"]) - if entry.get('cidr_block'): - elist.append(entry['cidr_block']) - elif entry.get('ipv6_cidr_block'): - elist.append(entry['ipv6_cidr_block']) + if entry.get("cidr_block"): + elist.append(entry["cidr_block"]) + elif entry.get("ipv6_cidr_block"): + elist.append(entry["ipv6_cidr_block"]) else: elist.append(None) elist = elist + [None, None, None, None] - if entry['protocol'] in ('1', '58'): - elist[4] = entry.get('icmp_type_code', {}).get('type') - elist[5] = entry.get('icmp_type_code', {}).get('code') + if entry["protocol"] in ("1", "58"): + elist[4] = entry.get("icmp_type_code", {}).get("type") + elist[5] = entry.get("icmp_type_code", {}).get("code") - if entry['protocol'] not in ('1', '6', '17', '58'): + if entry["protocol"] not in ("1", "6", "17", "58"): elist[6] = 0 elist[7] = 65535 - elif 'port_range' in entry: - elist[6] = entry['port_range']['from'] - elist[7] = entry['port_range']['to'] + elif "port_range" in entry: + elist[6] = entry["port_range"]["from"] + elist[7] = entry["port_range"]["to"] return elist def main(): - argument_spec = dict( - nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'), - filters=dict(default={}, type='dict')) + nacl_ids=dict(default=[], type="list", aliases=["nacl_id"], elements="str"), + filters=dict(default={}, type="dict"), + ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) list_ec2_vpc_nacls(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py index f23ffae19..2a731bf23 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_vpc_peer short_description: create, delete, accept, and reject VPC peering connections between two VPCs. version_added: 1.0.0 @@ -57,13 +55,13 @@ notes: author: - Mike Mochan (@mmochan) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Complete example to create and accept a local peering connection. - name: Create local account VPC peering Connection community.aws.ec2_vpc_peer: @@ -211,9 +209,9 @@ EXAMPLES = ''' peering_id: "{{ vpc_peer.peering_id }}" profile: bot03_profile_for_cross_account state: reject +""" -''' -RETURN = ''' +RETURN = r""" peering_id: description: The id of the VPC peering connection created/deleted. returned: always @@ -352,33 +350,33 @@ vpc_peering_connection: returned: success type: str example: "pcx-0123456789abcdef0" -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def wait_for_state(client, module, state, pcx_id): - waiter = client.get_waiter('vpc_peering_connection_exists') + waiter = client.get_waiter("vpc_peering_connection_exists") peer_filter = { - 'vpc-peering-connection-id': pcx_id, - 'status-code': state, + "vpc-peering-connection-id": pcx_id, + "status-code": state, } try: - waiter.wait( - Filters=ansible_dict_to_boto3_filter_list(peer_filter) - ) + waiter.wait(Filters=ansible_dict_to_boto3_filter_list(peer_filter)) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, "Failed to wait for state change") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -387,18 +385,18 @@ def wait_for_state(client, module, state, pcx_id): def describe_peering_connections(params, client): peer_filter = { - 'requester-vpc-info.vpc-id': params['VpcId'], - 'accepter-vpc-info.vpc-id': params['PeerVpcId'], + "requester-vpc-info.vpc-id": params["VpcId"], + "accepter-vpc-info.vpc-id": params["PeerVpcId"], } result = client.describe_vpc_peering_connections( aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(peer_filter), ) - if result['VpcPeeringConnections'] == []: + if result["VpcPeeringConnections"] == []: # Try again with the VPC/Peer relationship reversed peer_filter = { - 'requester-vpc-info.vpc-id': params['PeerVpcId'], - 'accepter-vpc-info.vpc-id': params['VpcId'], + "requester-vpc-info.vpc-id": params["PeerVpcId"], + "accepter-vpc-info.vpc-id": params["VpcId"], } result = client.describe_vpc_peering_connections( aws_retry=True, @@ -409,29 +407,32 @@ def describe_peering_connections(params, client): def is_active(peering_conn): - return peering_conn['Status']['Code'] == 'active' + return peering_conn["Status"]["Code"] == "active" def is_pending(peering_conn): - return peering_conn['Status']['Code'] == 'pending-acceptance' + return peering_conn["Status"]["Code"] == "pending-acceptance" def create_peer_connection(client, module): changed = False params = dict() - params['VpcId'] = module.params.get('vpc_id') - params['PeerVpcId'] = module.params.get('peer_vpc_id') - if module.params.get('peer_region'): - params['PeerRegion'] = module.params.get('peer_region') - if module.params.get('peer_owner_id'): - params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) + params["VpcId"] = module.params.get("vpc_id") + params["PeerVpcId"] = module.params.get("peer_vpc_id") + if module.params.get("peer_region"): + params["PeerRegion"] = module.params.get("peer_region") + if module.params.get("peer_owner_id"): + params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) peering_conns = describe_peering_connections(params, client) - for peering_conn in peering_conns['VpcPeeringConnections']: - pcx_id = peering_conn['VpcPeeringConnectionId'] - if ensure_ec2_tags(client, module, pcx_id, - purge_tags=module.params.get('purge_tags'), - tags=module.params.get('tags'), - ): + for peering_conn in peering_conns["VpcPeeringConnections"]: + pcx_id = peering_conn["VpcPeeringConnectionId"] + if ensure_ec2_tags( + client, + module, + pcx_id, + purge_tags=module.params.get("purge_tags"), + tags=module.params.get("tags"), + ): changed = True if is_active(peering_conn): return (changed, peering_conn) @@ -439,54 +440,59 @@ def create_peer_connection(client, module): return (changed, peering_conn) try: peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params) - pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] - if module.params.get('tags'): + pcx_id = peering_conn["VpcPeeringConnection"]["VpcPeeringConnectionId"] + if module.params.get("tags"): # Once the minimum botocore version is bumped to > 1.17.24 # (hopefully community.aws 3.0.0) we can add the tags to the # creation parameters - add_ec2_tags(client, module, pcx_id, module.params.get('tags'), - retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) - if module.params.get('wait'): - wait_for_state(client, module, 'pending-acceptance', pcx_id) + add_ec2_tags( + client, + module, + pcx_id, + module.params.get("tags"), + retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], + ) + if module.params.get("wait"): + wait_for_state(client, module, "pending-acceptance", pcx_id) changed = True - return (changed, peering_conn['VpcPeeringConnection']) + return (changed, peering_conn["VpcPeeringConnection"]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def remove_peer_connection(client, module): - pcx_id = module.params.get('peering_id') + pcx_id = module.params.get("peering_id") if pcx_id: peering_conn = get_peering_connection_by_id(pcx_id, client, module) else: params = dict() - params['VpcId'] = module.params.get('vpc_id') - params['PeerVpcId'] = module.params.get('peer_vpc_id') - params['PeerRegion'] = module.params.get('peer_region') - if module.params.get('peer_owner_id'): - params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) - peering_conn = describe_peering_connections(params, client)['VpcPeeringConnections'][0] + params["VpcId"] = module.params.get("vpc_id") + params["PeerVpcId"] = module.params.get("peer_vpc_id") + params["PeerRegion"] = module.params.get("peer_region") + if module.params.get("peer_owner_id"): + params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) + peering_conn = describe_peering_connections(params, client)["VpcPeeringConnections"][0] if not peering_conn: module.exit_json(changed=False) else: - pcx_id = pcx_id or peering_conn['VpcPeeringConnectionId'] + pcx_id = pcx_id or peering_conn["VpcPeeringConnectionId"] - if peering_conn['Status']['Code'] == 'deleted': - module.exit_json(msg='Connection in deleted state.', changed=False, peering_id=pcx_id) - if peering_conn['Status']['Code'] == 'rejected': + if peering_conn["Status"]["Code"] == "deleted": + module.exit_json(msg="Connection in deleted state.", changed=False, peering_id=pcx_id) + if peering_conn["Status"]["Code"] == "rejected": module.exit_json( - msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS', + msg="Connection has been rejected. State cannot be changed and will be removed automatically by AWS", changed=False, - peering_id=pcx_id + peering_id=pcx_id, ) try: params = dict() - params['VpcPeeringConnectionId'] = pcx_id + params["VpcPeeringConnectionId"] = pcx_id client.delete_vpc_peering_connection(aws_retry=True, **params) - if module.params.get('wait'): - wait_for_state(client, module, 'deleted', pcx_id) + if module.params.get("wait"): + wait_for_state(client, module, "deleted", pcx_id) module.exit_json(changed=True, peering_id=pcx_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) @@ -494,44 +500,55 @@ def remove_peer_connection(client, module): def get_peering_connection_by_id(peering_id, client, module): params = dict() - params['VpcPeeringConnectionIds'] = [peering_id] + params["VpcPeeringConnectionIds"] = [peering_id] try: vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params) - return vpc_peering_connection['VpcPeeringConnections'][0] - except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: - module.fail_json_aws(e, msg='Malformed connection ID') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error while describing peering connection by peering_id') + return vpc_peering_connection["VpcPeeringConnections"][0] + except is_boto3_error_code("InvalidVpcPeeringConnectionId.Malformed") as e: + module.fail_json_aws(e, msg="Malformed connection ID") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error while describing peering connection by peering_id") def accept_reject(state, client, module): changed = False params = dict() - peering_id = module.params.get('peering_id') - params['VpcPeeringConnectionId'] = peering_id + peering_id = module.params.get("peering_id") + params["VpcPeeringConnectionId"] = peering_id vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module) - peering_status = vpc_peering_connection['Status']['Code'] + peering_status = vpc_peering_connection["Status"]["Code"] - if peering_status not in ['active', 'rejected']: + if peering_status not in ["active", "rejected"]: try: - if state == 'accept': + if state == "accept": client.accept_vpc_peering_connection(aws_retry=True, **params) - target_state = 'active' + target_state = "active" else: client.reject_vpc_peering_connection(aws_retry=True, **params) - target_state = 'rejected' - if module.params.get('tags'): - add_ec2_tags(client, module, peering_id, module.params.get('tags'), - retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) + target_state = "rejected" + if module.params.get("tags"): + add_ec2_tags( + client, + module, + peering_id, + module.params.get("tags"), + retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], + ) changed = True - if module.params.get('wait'): + if module.params.get("wait"): wait_for_state(client, module, target_state, peering_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) - if ensure_ec2_tags(client, module, peering_id, - purge_tags=module.params.get('purge_tags'), - tags=module.params.get('tags'), - ): + if ensure_ec2_tags( + client, + module, + peering_id, + purge_tags=module.params.get("purge_tags"), + tags=module.params.get("tags"), + ): changed = True # Relaod peering conection infos to return latest state/params @@ -546,34 +563,36 @@ def main(): peer_region=dict(), peering_id=dict(), peer_owner_id=dict(), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), - wait=dict(default=False, type='bool'), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + state=dict(default="present", choices=["present", "absent", "accept", "reject"]), + wait=dict(default=False, type="bool"), ) required_if = [ - ('state', 'present', ['vpc_id', 'peer_vpc_id']), - ('state', 'accept', ['peering_id']), - ('state', 'reject', ['peering_id']) + ("state", "present", ["vpc_id", "peer_vpc_id"]), + ("state", "accept", ["peering_id"]), + ("state", "reject", ["peering_id"]), ] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if) - state = module.params.get('state') - peering_id = module.params.get('peering_id') - vpc_id = module.params.get('vpc_id') - peer_vpc_id = module.params.get('peer_vpc_id') + state = module.params.get("state") + peering_id = module.params.get("peering_id") + vpc_id = module.params.get("vpc_id") + peer_vpc_id = module.params.get("peer_vpc_id") try: - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": (changed, results) = create_peer_connection(client, module) - elif state == 'absent': + elif state == "absent": if not peering_id and (not vpc_id or not peer_vpc_id): - module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]') + module.fail_json( + msg="state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]" + ) remove_peer_connection(client, module) else: @@ -581,10 +600,12 @@ def main(): formatted_results = camel_dict_to_snake_dict(results) # Turn the resource tags from boto3 into an ansible friendly tag dictionary - formatted_results['tags'] = boto3_tag_list_to_ansible_dict(formatted_results.get('tags', [])) + formatted_results["tags"] = boto3_tag_list_to_ansible_dict(formatted_results.get("tags", [])) - module.exit_json(changed=changed, vpc_peering_connection=formatted_results, peering_id=results['VpcPeeringConnectionId']) + module.exit_json( + changed=changed, vpc_peering_connection=formatted_results, peering_id=results["VpcPeeringConnectionId"] + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py index 680fa3b68..badc9f8fd 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_peering_info short_description: Retrieves AWS VPC Peering details using AWS methods. version_added: 1.0.0 @@ -25,15 +23,15 @@ options: for possible filters. type: dict default: {} -author: Karen Cheng (@Etherdaemon) +author: + - Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all VPC Peers - name: List all vpc peers community.aws.ec2_vpc_peering_info: @@ -58,9 +56,9 @@ EXAMPLES = r''' filters: status-code: ['pending-acceptance'] register: pending_vpc_peers -''' +""" -RETURN = r''' +RETURN = r""" vpc_peering_connections: description: Details of the matching VPC peering connections. returned: success @@ -199,58 +197,62 @@ result: description: The result of the describe. returned: success type: list -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_vpc_peers(client, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('peer_connection_ids'): - params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("peer_connection_ids"): + params["VpcPeeringConnectionIds"] = module.params.get("peer_connection_ids") try: result = client.describe_vpc_peering_connections(aws_retry=True, **params) result = normalize_boto3_result(result) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe peering connections") - return result['VpcPeeringConnections'] + return result["VpcPeeringConnections"] def main(): argument_spec = dict( - filters=dict(default=dict(), type='dict'), - peer_connection_ids=dict(default=None, type='list', elements='str'), + filters=dict(default=dict(), type="dict"), + peer_connection_ids=dict(default=None, type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True,) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) try: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # Turn the boto3 result in to ansible friendly_snaked_names results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)] # Turn the boto3 result in to ansible friendly tag dictionary for peer in results: - peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', [])) + peer["tags"] = boto3_tag_list_to_ansible_dict(peer.get("tags", [])) module.exit_json(result=results, vpc_peering_connections=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py index 8332e1006..135658f76 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_vpc_vgw short_description: Create and delete AWS VPN Virtual Gateways version_added: 1.0.0 @@ -55,13 +53,13 @@ notes: author: - Nick Aslanidis (@naslanidis) extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new VGW attached to a specific VPC community.aws.ec2_vpc_vgw: state: present @@ -100,9 +98,9 @@ EXAMPLES = ''' profile: personal vpn_gateway_id: vgw-3a9aa123 register: deleted_vgw -''' +""" -RETURN = ''' +RETURN = r""" vgw: description: A description of the VGW returned: success @@ -133,7 +131,7 @@ vgw: type: str returned: success example: vpc-123456789abcdef01 -''' +""" import time @@ -142,13 +140,14 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule # AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes' @@ -156,11 +155,14 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_ta class VGWRetry(AWSRetry): @staticmethod def status_code_from_exception(error): - return (error.response['Error']['Code'], error.response['Error']['Message'],) + return ( + error.response["Error"]["Code"], + error.response["Error"]["Message"], + ) @staticmethod def found(response_code, catch_extra_error_codes=None): - retry_on = ['The maximum number of mutating objects has been reached.'] + retry_on = ["The maximum number of mutating objects has been reached."] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) @@ -180,37 +182,37 @@ def get_vgw_info(vgws): for vgw in vgws: vgw_info = { - 'id': vgw['VpnGatewayId'], - 'type': vgw['Type'], - 'state': vgw['State'], - 'vpc_id': None, - 'tags': dict() + "id": vgw["VpnGatewayId"], + "type": vgw["Type"], + "state": vgw["State"], + "vpc_id": None, + "tags": dict(), } - if vgw['Tags']: - vgw_info['tags'] = boto3_tag_list_to_ansible_dict(vgw['Tags']) + if vgw["Tags"]: + vgw_info["tags"] = boto3_tag_list_to_ansible_dict(vgw["Tags"]) - if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached': - vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId'] + if len(vgw["VpcAttachments"]) != 0 and vgw["VpcAttachments"][0]["State"] == "attached": + vgw_info["vpc_id"] = vgw["VpcAttachments"][0]["VpcId"] return vgw_info def wait_for_status(client, module, vpn_gateway_id, status): polling_increment_secs = 15 - max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + max_retries = module.params.get("wait_timeout") // polling_increment_secs status_achieved = False for x in range(0, max_retries): try: response = find_vgw(client, module, vpn_gateway_id) - if response[0]['VpcAttachments'][0]['State'] == status: + if response[0]["VpcAttachments"][0]["State"] == status: status_achieved = True break else: time.sleep(polling_increment_secs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failure while waiting for status update') + module.fail_json_aws(e, msg="Failure while waiting for status update") result = response return status_achieved, result @@ -218,22 +220,21 @@ def wait_for_status(client, module, vpn_gateway_id, status): def attach_vgw(client, module, vpn_gateway_id): params = dict() - params['VpcId'] = module.params.get('vpc_id') + params["VpcId"] = module.params.get("vpc_id") try: # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State # as available several seconds before actually permitting a new attachment. # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185 - response = VGWRetry.jittered_backoff(retries=5, - catch_extra_error_codes=['InvalidParameterValue'] - )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id, - VpcId=params['VpcId']) + response = VGWRetry.jittered_backoff(retries=5, catch_extra_error_codes=["InvalidParameterValue"])( + client.attach_vpn_gateway + )(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to attach VPC') + module.fail_json_aws(e, msg="Failed to attach VPC") - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached') + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "attached") if not status_achieved: - module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console') + module.fail_json(msg="Error waiting for vpc to attach to vgw - please check the AWS console") result = response return result @@ -241,19 +242,19 @@ def attach_vgw(client, module, vpn_gateway_id): def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): params = dict() - params['VpcId'] = module.params.get('vpc_id') + params["VpcId"] = module.params.get("vpc_id") try: if vpc_id: response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id, aws_retry=True) else: - response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'], aws_retry=True) + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Failed to detach gateway') + module.fail_json_aws(e, "Failed to detach gateway") - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached') + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "detached") if not status_achieved: - module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console') + module.fail_json(msg="Error waiting for vpc to detach from vgw - please check the AWS console") result = response return result @@ -261,37 +262,37 @@ def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): def create_vgw(client, module): params = dict() - params['Type'] = module.params.get('type') - tags = module.params.get('tags') or {} - tags['Name'] = module.params.get('name') - params['TagSpecifications'] = boto3_tag_specifications(tags, ['vpn-gateway']) - if module.params.get('asn'): - params['AmazonSideAsn'] = module.params.get('asn') + params["Type"] = module.params.get("type") + tags = module.params.get("tags") or {} + tags["Name"] = module.params.get("name") + params["TagSpecifications"] = boto3_tag_specifications(tags, ["vpn-gateway"]) + if module.params.get("asn"): + params["AmazonSideAsn"] = module.params.get("asn") try: response = client.create_vpn_gateway(aws_retry=True, **params) - get_waiter( - client, 'vpn_gateway_exists' - ).wait( - VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']] - ) + get_waiter(client, "vpn_gateway_exists").wait(VpnGatewayIds=[response["VpnGateway"]["VpnGatewayId"]]) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId'])) - except is_boto3_error_code('VpnGatewayLimitExceeded') as e: + module.fail_json_aws( + e, msg=f"Failed to wait for Vpn Gateway {response['VpnGateway']['VpnGatewayId']} to be available" + ) + except is_boto3_error_code("VpnGatewayLimitExceeded") as e: module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to create gateway') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to create gateway") result = response return result def delete_vgw(client, module, vpn_gateway_id): - try: response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to delete gateway') + module.fail_json_aws(e, msg="Failed to delete gateway") # return the deleted VpnGatewayId as this is not included in the above response result = vpn_gateway_id @@ -300,13 +301,13 @@ def delete_vgw(client, module, vpn_gateway_id): def find_vpc(client, module): params = dict() - params['vpc_id'] = module.params.get('vpc_id') + params["vpc_id"] = module.params.get("vpc_id") - if params['vpc_id']: + if params["vpc_id"]: try: - response = client.describe_vpcs(VpcIds=[params['vpc_id']], aws_retry=True) + response = client.describe_vpcs(VpcIds=[params["vpc_id"]], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe VPC') + module.fail_json_aws(e, msg="Failed to describe VPC") result = response return result @@ -315,66 +316,68 @@ def find_vpc(client, module): def find_vgw(client, module, vpn_gateway_id=None): params = dict() if vpn_gateway_id: - params['VpnGatewayIds'] = vpn_gateway_id + params["VpnGatewayIds"] = vpn_gateway_id else: - params['Filters'] = [ - {'Name': 'type', 'Values': [module.params.get('type')]}, - {'Name': 'tag:Name', 'Values': [module.params.get('name')]}, + params["Filters"] = [ + {"Name": "type", "Values": [module.params.get("type")]}, + {"Name": "tag:Name", "Values": [module.params.get("name")]}, ] - if module.params.get('state') == 'present': - params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']}) + if module.params.get("state") == "present": + params["Filters"].append({"Name": "state", "Values": ["pending", "available"]}) try: response = client.describe_vpn_gateways(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe gateway using filters') + module.fail_json_aws(e, msg="Failed to describe gateway using filters") - return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId']) + return sorted(response["VpnGateways"], key=lambda k: k["VpnGatewayId"]) def ensure_vgw_present(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been # found and we will not create another vgw. changed = False params = dict() result = dict() - params['Name'] = module.params.get('name') - params['VpcId'] = module.params.get('vpc_id') - params['Type'] = module.params.get('type') - params['Tags'] = module.params.get('tags') - params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + params["Name"] = module.params.get("name") + params["VpcId"] = module.params.get("vpc_id") + params["Type"] = module.params.get("type") + params["Tags"] = module.params.get("tags") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") # check that the vpc_id exists. If not, an exception is thrown - if params['VpcId']: + if params["VpcId"]: vpc = find_vpc(client, module) # check if a gateway matching our module args already exists existing_vgw = find_vgw(client, module) if existing_vgw != []: - vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] - desired_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] + desired_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") if desired_tags is None: desired_tags = dict() purge_tags = False - tags = dict(Name=module.params.get('name')) + tags = dict(Name=module.params.get("name")) tags.update(desired_tags) - changed = ensure_ec2_tags(client, module, vpn_gateway_id, resource_type='vpn-gateway', - tags=tags, purge_tags=purge_tags) + changed = ensure_ec2_tags( + client, module, vpn_gateway_id, resource_type="vpn-gateway", tags=tags, purge_tags=purge_tags + ) # if a vpc_id was provided, check if it exists and if it's attached - if params['VpcId']: - - current_vpc_attachments = existing_vgw[0]['VpcAttachments'] - - if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached': - if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached': + if params["VpcId"]: + current_vpc_attachments = existing_vgw[0]["VpcAttachments"] + + if current_vpc_attachments != [] and current_vpc_attachments[0]["State"] == "attached": + if ( + current_vpc_attachments[0]["VpcId"] != params["VpcId"] + or current_vpc_attachments[0]["State"] != "attached" + ): # detach the existing vpc from the virtual gateway - vpc_to_detach = current_vpc_attachments[0]['VpcId'] + vpc_to_detach = current_vpc_attachments[0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) - get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id]) + get_waiter(client, "vpn_gateway_detached").wait(VpnGatewayIds=[vpn_gateway_id]) attached_vgw = attach_vgw(client, module, vpn_gateway_id) changed = True else: @@ -386,10 +389,10 @@ def ensure_vgw_present(client, module): else: existing_vgw = find_vgw(client, module, [vpn_gateway_id]) - if existing_vgw[0]['VpcAttachments'] != []: - if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + if existing_vgw[0]["VpcAttachments"] != []: + if existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": # detach the vpc from the vgw - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) changed = True @@ -397,10 +400,10 @@ def ensure_vgw_present(client, module): # create a new vgw new_vgw = create_vgw(client, module) changed = True - vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId'] + vpn_gateway_id = new_vgw["VpnGateway"]["VpnGatewayId"] # if a vpc-id was supplied, attempt to attach it to the vgw - if params['VpcId']: + if params["VpcId"]: attached_vgw = attach_vgw(client, module, vpn_gateway_id) changed = True @@ -411,45 +414,46 @@ def ensure_vgw_present(client, module): def ensure_vgw_absent(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been # found and we will take steps to delete it. changed = False params = dict() result = dict() - params['Name'] = module.params.get('name') - params['VpcId'] = module.params.get('vpc_id') - params['Type'] = module.params.get('type') - params['Tags'] = module.params.get('tags') - params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + params["Name"] = module.params.get("name") + params["VpcId"] = module.params.get("vpc_id") + params["Type"] = module.params.get("type") + params["Tags"] = module.params.get("tags") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") # check if a gateway matching our module args already exists - if params['VpnGatewayIds']: - existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']]) - if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted': + if params["VpnGatewayIds"]: + existing_vgw_with_id = find_vgw(client, module, [params["VpnGatewayIds"]]) + if existing_vgw_with_id != [] and existing_vgw_with_id[0]["State"] != "deleted": existing_vgw = existing_vgw_with_id - if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': - if params['VpcId']: - if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: - module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": + if params["VpcId"]: + if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: + module.fail_json( + msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" + ) else: # detach the vpc from the vgw - detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId']) - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + detach_vgw(client, module, params["VpnGatewayIds"], params["VpcId"]) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] - detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach) - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] + detach_vgw(client, module, params["VpnGatewayIds"], vpc_to_detach) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: # no vpc's are attached so attempt to delete the vgw - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: @@ -458,20 +462,22 @@ def ensure_vgw_absent(client, module): else: # Check that a name and type argument has been supplied if no vgw-id - if not module.params.get('name') or not module.params.get('type'): - module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied') + if not module.params.get("name") or not module.params.get("type"): + module.fail_json(msg="A name and type is required when no vgw-id and a status of 'absent' is supplied") existing_vgw = find_vgw(client, module) - if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted': - vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] - if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': - if params['VpcId']: - if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: - module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + if existing_vgw != [] and existing_vgw[0]["State"] != "deleted": + vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] + if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": + if params["VpcId"]: + if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: + module.fail_json( + msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" + ) else: # detach the vpc from the vgw - detach_vgw(client, module, vpn_gateway_id, params['VpcId']) + detach_vgw(client, module, vpn_gateway_id, params["VpcId"]) # now that the vpc has been detached, delete the vgw deleted_vgw = delete_vgw(client, module, vpn_gateway_id) @@ -479,7 +485,7 @@ def ensure_vgw_absent(client, module): else: # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) changed = True @@ -501,29 +507,28 @@ def ensure_vgw_absent(client, module): def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(), vpn_gateway_id=dict(), vpc_id=dict(), - asn=dict(type='int'), - wait_timeout=dict(type='int', default=320), - type=dict(default='ipsec.1', choices=['ipsec.1']), - tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + asn=dict(type="int"), + wait_timeout=dict(type="int", default=320), + type=dict(default="ipsec.1", choices=["ipsec.1"]), + tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['name']]]) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["name"]]]) - state = module.params.get('state').lower() + state = module.params.get("state").lower() - client = module.client('ec2', retry_decorator=VGWRetry.jittered_backoff(retries=10)) + client = module.client("ec2", retry_decorator=VGWRetry.jittered_backoff(retries=10)) - if state == 'present': + if state == "present": (changed, results) = ensure_vgw_present(client, module) else: (changed, results) = ensure_vgw_absent(client, module) module.exit_json(changed=changed, vgw=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py index fcb520cf0..6ab311c03 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_vgw_info version_added: 1.0.0 @@ -28,12 +26,12 @@ options: author: - "Nick Aslanidis (@naslanidis)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all virtual gateways for an account or profile @@ -47,7 +45,7 @@ EXAMPLES = r''' region: ap-southeast-2 profile: production filters: - "tag:Name": "main-virt-gateway" + "tag:Name": "main-virt-gateway" register: vgw_info - name: Gather information about a specific virtual gateway by VpnGatewayIds @@ -56,9 +54,9 @@ EXAMPLES = r''' profile: production vpn_gateway_ids: vgw-c432f6a7 register: vgw_info -''' +""" -RETURN = r''' +RETURN = r""" virtual_gateways: description: The virtual gateways for the account. returned: always @@ -121,7 +119,7 @@ virtual_gateways: type: dict returned: success example: {"MyKey": "MyValue"} -''' +""" try: import botocore @@ -130,19 +128,20 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_virtual_gateway_info(virtual_gateway): - tags = virtual_gateway.get('Tags', []) + tags = virtual_gateway.get("Tags", []) resource_tags = boto3_tag_list_to_ansible_dict(tags) virtual_gateway_info = dict( - VpnGatewayId=virtual_gateway['VpnGatewayId'], - State=virtual_gateway['State'], - Type=virtual_gateway['Type'], - VpcAttachments=virtual_gateway['VpcAttachments'], + VpnGatewayId=virtual_gateway["VpnGatewayId"], + State=virtual_gateway["State"], + Type=virtual_gateway["Type"], + VpcAttachments=virtual_gateway["VpcAttachments"], Tags=tags, ResourceTags=resource_tags, ) @@ -152,32 +151,34 @@ def get_virtual_gateway_info(virtual_gateway): def list_virtual_gateways(client, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) if module.params.get("vpn_gateway_ids"): - params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_ids") try: all_virtual_gateways = client.describe_vpn_gateways(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to list gateways") - return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=['ResourceTags']) - for vgw in all_virtual_gateways['VpnGateways']] + return [ + camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=["ResourceTags"]) + for vgw in all_virtual_gateways["VpnGateways"] + ] def main(): argument_spec = dict( - filters=dict(type='dict', default=dict()), - vpn_gateway_ids=dict(type='list', default=None, elements='str'), + filters=dict(type="dict", default=dict()), + vpn_gateway_ids=dict(type="list", default=None, elements="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - connection = module.client('ec2') + connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # call your function here results = list_virtual_gateways(connection, module) @@ -185,5 +186,5 @@ def main(): module.exit_json(virtual_gateways=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py index 77a994aaa..abc97f796 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_vpn version_added: 1.0.0 @@ -14,11 +12,6 @@ short_description: Create, modify, and delete EC2 VPN connections description: - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters option or specifying the VPN connection identifier. -extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws - - amazon.aws.boto3 - - amazon.aws.tags author: - "Sloane Hertel (@s-hertel)" options: @@ -42,6 +35,7 @@ options: vpn_gateway_id: description: - The ID of the virtual private gateway. + - Mutually exclusive with I(transit_gateway_id). type: str vpn_connection_id: description: @@ -53,6 +47,12 @@ options: default: False type: bool required: false + transit_gateway_id: + description: + - The ID of the transit gateway. + - Mutually exclusive with I(vpn_gateway_id). + type: str + version_added: 6.2.0 tunnel_options: description: - An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr) @@ -135,18 +135,28 @@ options: required: false type: int default: 15 -''' +extends_documentation_fragment: + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. -- name: create a VPN connection +- name: create a VPN connection with vpn_gateway_id community.aws.ec2_vpc_vpn: state: present vpn_gateway_id: vgw-XXXXXXXX customer_gateway_id: cgw-XXXXXXXX +- name: Attach a vpn connection to transit gateway + community.aws.ec2_vpc_vpn: + state: present + transit_gateway_id: tgw-XXXXXXXX + customer_gateway_id: cgw-XXXXXXXX + - name: modify VPN connection tags community.aws.ec2_vpc_vpn: state: present @@ -233,6 +243,12 @@ vpn_gateway_id: returned: I(state=present) sample: vpn_gateway_id: vgw-cb0ae2a2 +transit_gateway_id: + description: The transit gateway id to which the vpn connection can be attached. + type: str + returned: I(state=present) + sample: + transit_gateway_id: tgw-cb0ae2a2 options: description: The VPN connection options (currently only containing static_routes_only). type: complex @@ -293,19 +309,23 @@ vpn_connection_id: vpn_connection_id: vpn-781e0e19 """ -from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - try: - from botocore.exceptions import BotoCoreError, ClientError, WaiterError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class VPNConnectionException(Exception): def __init__(self, msg, exception=None): @@ -319,11 +339,14 @@ class VPNConnectionException(Exception): class VPNRetry(AWSRetry): @staticmethod def status_code_from_exception(error): - return (error.response['Error']['Code'], error.response['Error']['Message'],) + return ( + error.response["Error"]["Code"], + error.response["Error"]["Message"], + ) @staticmethod def found(response_code, catch_extra_error_codes=None): - retry_on = ['The maximum number of mutating objects has been reached.'] + retry_on = ["The maximum number of mutating objects has been reached."] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) @@ -338,14 +361,14 @@ class VPNRetry(AWSRetry): def find_connection(connection, module_params, vpn_connection_id=None): - ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, - or raise an error if there were multiple viable connections. ''' + """Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, + or raise an error if there were multiple viable connections.""" - filters = module_params.get('filters') + filters = module_params.get("filters") # vpn_connection_id may be provided via module option; takes precedence over any filter values - if not vpn_connection_id and module_params.get('vpn_connection_id'): - vpn_connection_id = module_params.get('vpn_connection_id') + if not vpn_connection_id and module_params.get("vpn_connection_id"): + vpn_connection_id = module_params.get("vpn_connection_id") if not isinstance(vpn_connection_id, list) and vpn_connection_id: vpn_connection_id = [to_text(vpn_connection_id)] @@ -360,14 +383,13 @@ def find_connection(connection, module_params, vpn_connection_id=None): # see if there is a unique matching connection try: if vpn_connection_id: - existing_conn = connection.describe_vpn_connections(aws_retry=True, - VpnConnectionIds=vpn_connection_id, - Filters=formatted_filter) + existing_conn = connection.describe_vpn_connections( + aws_retry=True, VpnConnectionIds=vpn_connection_id, Filters=formatted_filter + ) else: existing_conn = connection.describe_vpn_connections(aws_retry=True, Filters=formatted_filter) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed while describing VPN connection.", - exception=e) + raise VPNConnectionException(msg="Failed while describing VPN connection.", exception=e) return find_connection_response(connections=existing_conn) @@ -375,48 +397,56 @@ def find_connection(connection, module_params, vpn_connection_id=None): def add_routes(connection, vpn_connection_id, routes_to_add): for route in routes_to_add: try: - connection.create_vpn_connection_route(aws_retry=True, - VpnConnectionId=vpn_connection_id, - DestinationCidrBlock=route) + connection.create_vpn_connection_route( + aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg=f"Failed while adding route {route} to the VPN connection {vpn_connection_id}.", + exception=e, + ) def remove_routes(connection, vpn_connection_id, routes_to_remove): for route in routes_to_remove: try: - connection.delete_vpn_connection_route(aws_retry=True, - VpnConnectionId=vpn_connection_id, - DestinationCidrBlock=route) + connection.delete_vpn_connection_route( + aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg=f"Failed to remove route {route} from the VPN connection {vpn_connection_id}.", + exception=e, + ) def create_filter(module_params, provided_filters): - """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """ - boto3ify_filter = {'cgw-config': 'customer-gateway-configuration', - 'static-routes-only': 'option.static-routes-only', - 'cidr': 'route.destination-cidr-block', - 'bgp': 'bgp-asn', - 'vpn': 'vpn-connection-id', - 'vgw': 'vpn-gateway-id', - 'tag-keys': 'tag-key', - 'tag-values': 'tag-value', - 'tags': 'tag', - 'cgw': 'customer-gateway-id'} + """Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task""" + boto3ify_filter = { + "cgw-config": "customer-gateway-configuration", + "static-routes-only": "option.static-routes-only", + "cidr": "route.destination-cidr-block", + "bgp": "bgp-asn", + "vpn": "vpn-connection-id", + "vgw": "vpn-gateway-id", + "tag-keys": "tag-key", + "tag-values": "tag-value", + "tags": "tag", + "cgw": "customer-gateway-id", + } # unmodifiable options and their filter name counterpart - param_to_filter = {"customer_gateway_id": "customer-gateway-id", - "vpn_gateway_id": "vpn-gateway-id", - "vpn_connection_id": "vpn-connection-id"} + param_to_filter = { + "customer_gateway_id": "customer-gateway-id", + "vpn_gateway_id": "vpn-gateway-id", + "transit_gateway_id": "transit-gateway-id", + "vpn_connection_id": "vpn-connection-id", + } flat_filter_dict = {} formatted_filter = [] for raw_param in dict(provided_filters): - # fix filter names to be recognized by boto3 if raw_param in boto3ify_filter: param = boto3ify_filter[raw_param] @@ -424,17 +454,17 @@ def create_filter(module_params, provided_filters): elif raw_param in list(boto3ify_filter.items()): param = raw_param else: - raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param)) + raise VPNConnectionException(msg=f"{raw_param} is not a valid filter.") # reformat filters with special formats - if param == 'tag': + if param == "tag": for key in provided_filters[param]: - formatted_key = 'tag:' + key + formatted_key = "tag:" + key if isinstance(provided_filters[param][key], list): flat_filter_dict[formatted_key] = str(provided_filters[param][key]) else: flat_filter_dict[formatted_key] = [str(provided_filters[param][key])] - elif param == 'option.static-routes-only': + elif param == "option.static-routes-only": flat_filter_dict[param] = [str(provided_filters[param]).lower()] else: if isinstance(provided_filters[param], list): @@ -448,25 +478,25 @@ def create_filter(module_params, provided_filters): flat_filter_dict[param_to_filter[param]] = [module_params.get(param)] # change the flat dict into something boto3 will understand - formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()] + formatted_filter = [{"Name": key, "Values": value} for key, value in flat_filter_dict.items()] return formatted_filter def find_connection_response(connections=None): - """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, - returns None if the connection does not exist, raise an error if multiple matches are found. """ + """Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, + returns None if the connection does not exist, raise an error if multiple matches are found.""" # Found no connections - if not connections or 'VpnConnections' not in connections: + if not connections or "VpnConnections" not in connections: return None # Too many results - elif connections and len(connections['VpnConnections']) > 1: + elif connections and len(connections["VpnConnections"]) > 1: viable = [] - for each in connections['VpnConnections']: + for each in connections["VpnConnections"]: # deleted connections are not modifiable - if each['State'] not in ("deleted", "deleting"): + if each["State"] not in ("deleted", "deleting"): viable.append(each) if len(viable) == 1: # Found one viable result; return unique match @@ -475,20 +505,34 @@ def find_connection_response(connections=None): # Found a result but it was deleted already; since there was only one viable result create a new one return None else: - raise VPNConnectionException(msg="More than one matching VPN connection was found. " - "To modify or delete a VPN please specify vpn_connection_id or add filters.") + raise VPNConnectionException( + msg=( + "More than one matching VPN connection was found. " + "To modify or delete a VPN please specify vpn_connection_id or add filters." + ) + ) # Found unique match - elif connections and len(connections['VpnConnections']) == 1: + elif connections and len(connections["VpnConnections"]) == 1: # deleted connections are not modifiable - if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"): - return connections['VpnConnections'][0] - - -def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None): - """ Creates a VPN connection """ - - options = {'StaticRoutesOnly': static_only} + if connections["VpnConnections"][0]["State"] not in ("deleted", "deleting"): + return connections["VpnConnections"][0] + + +def create_connection( + connection, + customer_gateway_id, + static_only, + vpn_gateway_id, + transit_gateway_id, + connection_type, + max_attempts, + delay, + tunnel_options=None, +): + """Creates a VPN connection""" + + options = {"StaticRoutesOnly": static_only} if tunnel_options and len(tunnel_options) <= 2: t_opt = [] for m in tunnel_options: @@ -498,108 +542,106 @@ def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_ raise TypeError("non-dict list member") t_opt.append(m) if t_opt: - options['TunnelOptions'] = t_opt + options["TunnelOptions"] = t_opt + + if not (customer_gateway_id and (vpn_gateway_id or transit_gateway_id)): + raise VPNConnectionException( + msg=( + "No matching connection was found. To create a new connection you must provide " + "customer_gateway_id and one of either transit_gateway_id or vpn_gateway_id." + ) + ) + vpn_connection_params = {"Type": connection_type, "CustomerGatewayId": customer_gateway_id, "Options": options} + if vpn_gateway_id: + vpn_connection_params["VpnGatewayId"] = vpn_gateway_id + if transit_gateway_id: + vpn_connection_params["TransitGatewayId"] = transit_gateway_id - if not (customer_gateway_id and vpn_gateway_id): - raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide " - "both vpn_gateway_id and customer_gateway_id.") try: - vpn = connection.create_vpn_connection(Type=connection_type, - CustomerGatewayId=customer_gateway_id, - VpnGatewayId=vpn_gateway_id, - Options=options) - connection.get_waiter('vpn_connection_available').wait( - VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + vpn = connection.create_vpn_connection(**vpn_connection_params) + connection.get_waiter("vpn_connection_available").wait( + VpnConnectionIds=[vpn["VpnConnection"]["VpnConnectionId"]], + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, ) except WaiterError as e: - raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']), - exception=e) + raise VPNConnectionException( + msg=f"Failed to wait for VPN connection {vpn['VpnConnection']['VpnConnectionId']} to be available", + exception=e, + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to create VPN connection", - exception=e) + raise VPNConnectionException(msg="Failed to create VPN connection", exception=e) - return vpn['VpnConnection'] + return vpn["VpnConnection"] def delete_connection(connection, vpn_connection_id, delay, max_attempts): - """ Deletes a VPN connection """ + """Deletes a VPN connection""" try: connection.delete_vpn_connection(aws_retry=True, VpnConnectionId=vpn_connection_id) - connection.get_waiter('vpn_connection_deleted').wait( - VpnConnectionIds=[vpn_connection_id], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + connection.get_waiter("vpn_connection_deleted").wait( + VpnConnectionIds=[vpn_connection_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) except WaiterError as e: - raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg=f"Failed to wait for VPN connection {vpn_connection_id} to be removed", exception=e + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), - exception=e) + raise VPNConnectionException(msg=f"Failed to delete the VPN connection: {vpn_connection_id}", exception=e) def add_tags(connection, vpn_connection_id, add): try: - connection.create_tags(aws_retry=True, - Resources=[vpn_connection_id], - Tags=add) + connection.create_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=add) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), - exception=e) + raise VPNConnectionException(msg=f"Failed to add the tags: {add}.", exception=e) def remove_tags(connection, vpn_connection_id, remove): # format tags since they are a list in the format ['tag1', 'tag2', 'tag3'] - key_dict_list = [{'Key': tag} for tag in remove] + key_dict_list = [{"Key": tag} for tag in remove] try: - connection.delete_tags(aws_retry=True, - Resources=[vpn_connection_id], - Tags=key_dict_list) + connection.delete_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=key_dict_list) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), - exception=e) + raise VPNConnectionException(msg=f"Failed to remove the tags: {remove}.", exception=e) def check_for_update(connection, module_params, vpn_connection_id): - """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """ - tags = module_params.get('tags') - routes = module_params.get('routes') - purge_tags = module_params.get('purge_tags') - purge_routes = module_params.get('purge_routes') + """Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change.""" + tags = module_params.get("tags") + routes = module_params.get("routes") + purge_tags = module_params.get("purge_tags") + purge_routes = module_params.get("purge_routes") vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id) current_attrs = camel_dict_to_snake_dict(vpn_connection) # Initialize changes dict - changes = {'tags_to_add': [], - 'tags_to_remove': [], - 'routes_to_add': [], - 'routes_to_remove': []} + changes = {"tags_to_add": [], "tags_to_remove": [], "routes_to_add": [], "routes_to_remove": []} # Get changes to tags - current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value') + current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get("tags", []), "key", "value") if tags is None: - changes['tags_to_remove'] = [] - changes['tags_to_add'] = [] + changes["tags_to_remove"] = [] + changes["tags_to_add"] = [] else: - tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags) - changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add) + tags_to_add, changes["tags_to_remove"] = compare_aws_tags(current_tags, tags, purge_tags) + changes["tags_to_add"] = ansible_dict_to_boto3_tag_list(tags_to_add) # Get changes to routes - if 'Routes' in vpn_connection: - current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']] + if "Routes" in vpn_connection: + current_routes = [route["DestinationCidrBlock"] for route in vpn_connection["Routes"]] if purge_routes: - changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes] - changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes] + changes["routes_to_remove"] = [old_route for old_route in current_routes if old_route not in routes] + changes["routes_to_add"] = [new_route for new_route in routes if new_route not in current_routes] # Check if nonmodifiable attributes are attempted to be modified for attribute in current_attrs: if attribute in ("tags", "routes", "state"): continue - elif attribute == 'options': - will_be = module_params.get('static_only', None) - is_now = bool(current_attrs[attribute]['static_routes_only']) - attribute = 'static_only' - elif attribute == 'type': + elif attribute == "options": + will_be = module_params.get("static_only", None) + is_now = bool(current_attrs[attribute]["static_routes_only"]) + attribute = "static_only" + elif attribute == "type": will_be = module_params.get("connection_type", None) is_now = current_attrs[attribute] else: @@ -607,110 +649,118 @@ def check_for_update(connection, module_params, vpn_connection_id): will_be = module_params.get(attribute, None) if will_be is not None and to_text(will_be) != to_text(is_now): - raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " - "connection attributes are tags and routes. The value you tried to change it to " - "is {2}.".format(attribute, is_now, will_be)) + raise VPNConnectionException( + msg=( + f"You cannot modify {attribute}, the current value of which is {is_now}. Modifiable VPN connection" + f" attributes are tags and routes. The value you tried to change it to is {will_be}." + ) + ) return changes def make_changes(connection, vpn_connection_id, changes): - """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', - the values of which are lists (generated by check_for_update()). + """changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', + the values of which are lists (generated by check_for_update()). """ changed = False - if changes['tags_to_add']: + if changes["tags_to_add"]: changed = True - add_tags(connection, vpn_connection_id, changes['tags_to_add']) + add_tags(connection, vpn_connection_id, changes["tags_to_add"]) - if changes['tags_to_remove']: + if changes["tags_to_remove"]: changed = True - remove_tags(connection, vpn_connection_id, changes['tags_to_remove']) + remove_tags(connection, vpn_connection_id, changes["tags_to_remove"]) - if changes['routes_to_add']: + if changes["routes_to_add"]: changed = True - add_routes(connection, vpn_connection_id, changes['routes_to_add']) + add_routes(connection, vpn_connection_id, changes["routes_to_add"]) - if changes['routes_to_remove']: + if changes["routes_to_remove"]: changed = True - remove_routes(connection, vpn_connection_id, changes['routes_to_remove']) + remove_routes(connection, vpn_connection_id, changes["routes_to_remove"]) return changed def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None): - """ Returns the changes that would be made to a VPN Connection """ - state = module_params.get('state') - if state == 'absent': + """Returns the changes that would be made to a VPN Connection""" + state = module_params.get("state") + if state == "absent": if vpn_connection_id: return True, {} else: return False, {} changed = False - results = {'customer_gateway_configuration': '', - 'customer_gateway_id': module_params.get('customer_gateway_id'), - 'vpn_gateway_id': module_params.get('vpn_gateway_id'), - 'options': {'static_routes_only': module_params.get('static_only')}, - 'routes': [module_params.get('routes')]} + results = { + "customer_gateway_configuration": "", + "customer_gateway_id": module_params.get("customer_gateway_id"), + "vpn_gateway_id": module_params.get("vpn_gateway_id"), + "transit_gateway_id": module_params.get("transit_gateway_id"), + "options": {"static_routes_only": module_params.get("static_only")}, + "routes": [module_params.get("routes")], + } # get combined current tags and tags to set - present_tags = module_params.get('tags') + present_tags = module_params.get("tags") if present_tags is None: pass - elif current_state and 'Tags' in current_state: - current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags']) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get('purge_tags')) + elif current_state and "Tags" in current_state: + current_tags = boto3_tag_list_to_ansible_dict(current_state["Tags"]) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get("purge_tags")) changed |= bool(tags_to_remove) or bool(tags_to_add) - if module_params.get('purge_tags'): + if module_params.get("purge_tags"): current_tags = {} current_tags.update(present_tags) - results['tags'] = current_tags - elif module_params.get('tags'): + results["tags"] = current_tags + elif module_params.get("tags"): changed = True if present_tags: - results['tags'] = present_tags + results["tags"] = present_tags # get combined current routes and routes to add - present_routes = module_params.get('routes') - if current_state and 'Routes' in current_state: - current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']] - if module_params.get('purge_routes'): + present_routes = module_params.get("routes") + if current_state and "Routes" in current_state: + current_routes = [route["DestinationCidrBlock"] for route in current_state["Routes"]] + if module_params.get("purge_routes"): if set(current_routes) != set(present_routes): changed = True elif set(present_routes) != set(current_routes): if not set(present_routes) < set(current_routes): changed = True present_routes.extend([route for route in current_routes if route not in present_routes]) - elif module_params.get('routes'): + elif module_params.get("routes"): changed = True - results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] + results["routes"] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] # return the vpn_connection_id if it's known if vpn_connection_id: - results['vpn_connection_id'] = vpn_connection_id + results["vpn_connection_id"] = vpn_connection_id else: changed = True - results['vpn_connection_id'] = 'vpn-XXXXXXXX' + results["vpn_connection_id"] = "vpn-XXXXXXXX" return changed, results def ensure_present(connection, module_params, check_mode=False): - """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """ + """Creates and adds tags to a VPN connection. If the connection already exists update tags.""" vpn_connection = find_connection(connection, module_params) changed = False - delay = module_params.get('delay') - max_attempts = module_params.get('wait_timeout') // delay + delay = module_params.get("delay") + max_attempts = module_params.get("wait_timeout") // delay # No match but vpn_connection_id was specified. - if not vpn_connection and module_params.get('vpn_connection_id'): - raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?") + if not vpn_connection and module_params.get("vpn_connection_id"): + raise VPNConnectionException( + msg="There is no VPN connection available or pending with that id. Did you delete it?" + ) # Unique match was found. Check if attributes provided differ. elif vpn_connection: - vpn_connection_id = vpn_connection['VpnConnectionId'] + vpn_connection_id = vpn_connection["VpnConnectionId"] # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove changes = check_for_update(connection, module_params, vpn_connection_id) if check_mode: @@ -722,38 +772,43 @@ def ensure_present(connection, module_params, check_mode=False): changed = True if check_mode: return get_check_mode_results(connection, module_params) - vpn_connection = create_connection(connection, - customer_gateway_id=module_params.get('customer_gateway_id'), - static_only=module_params.get('static_only'), - vpn_gateway_id=module_params.get('vpn_gateway_id'), - connection_type=module_params.get('connection_type'), - tunnel_options=module_params.get('tunnel_options'), - max_attempts=max_attempts, - delay=delay) - changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId']) - make_changes(connection, vpn_connection['VpnConnectionId'], changes) + vpn_connection = create_connection( + connection, + customer_gateway_id=module_params.get("customer_gateway_id"), + static_only=module_params.get("static_only"), + vpn_gateway_id=module_params.get("vpn_gateway_id"), + transit_gateway_id=module_params.get("transit_gateway_id"), + connection_type=module_params.get("connection_type"), + tunnel_options=module_params.get("tunnel_options"), + max_attempts=max_attempts, + delay=delay, + ) + changes = check_for_update(connection, module_params, vpn_connection["VpnConnectionId"]) + make_changes(connection, vpn_connection["VpnConnectionId"], changes) # get latest version if a change has been made and make tags output nice before returning it if vpn_connection: - vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId']) - if 'Tags' in vpn_connection: - vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags']) + vpn_connection = find_connection(connection, module_params, vpn_connection["VpnConnectionId"]) + if "Tags" in vpn_connection: + vpn_connection["Tags"] = boto3_tag_list_to_ansible_dict(vpn_connection["Tags"]) return changed, vpn_connection def ensure_absent(connection, module_params, check_mode=False): - """ Deletes a VPN connection if it exists. """ + """Deletes a VPN connection if it exists.""" vpn_connection = find_connection(connection, module_params) if check_mode: - return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None) + return get_check_mode_results( + connection, module_params, vpn_connection["VpnConnectionId"] if vpn_connection else None + ) - delay = module_params.get('delay') - max_attempts = module_params.get('wait_timeout') // delay + delay = module_params.get("delay") + max_attempts = module_params.get("wait_timeout") // delay if vpn_connection: - delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts) + delete_connection(connection, vpn_connection["VpnConnectionId"], delay=delay, max_attempts=max_attempts) changed = True else: changed = False @@ -763,32 +818,40 @@ def ensure_absent(connection, module_params, check_mode=False): def main(): argument_spec = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - filters=dict(type='dict', default={}), - vpn_gateway_id=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - connection_type=dict(default='ipsec.1', type='str'), - tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'), - static_only=dict(default=False, type='bool'), - customer_gateway_id=dict(type='str'), - vpn_connection_id=dict(type='str'), - purge_tags=dict(type='bool', default=True), - routes=dict(type='list', default=[], elements='str'), - purge_routes=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - delay=dict(type='int', default=15), + state=dict(type="str", default="present", choices=["present", "absent"]), + filters=dict(type="dict", default={}), + vpn_gateway_id=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + connection_type=dict(default="ipsec.1", type="str"), + transit_gateway_id=dict(type="str"), + tunnel_options=dict(no_log=True, type="list", default=[], elements="dict"), + static_only=dict(default=False, type="bool"), + customer_gateway_id=dict(type="str"), + vpn_connection_id=dict(type="str"), + purge_tags=dict(type="bool", default=True), + routes=dict(type="list", default=[], elements="str"), + purge_routes=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + delay=dict(type="int", default=15), + ) + mutually_exclusive = [ + ["vpn_gateway_id", "transit_gateway_id"], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10)) + connection = module.client("ec2", retry_decorator=VPNRetry.jittered_backoff(retries=10)) - state = module.params.get('state') + state = module.params.get("state") parameters = dict(module.params) try: - if state == 'present': + if state == "present": changed, response = ensure_present(connection, parameters, module.check_mode) - elif state == 'absent': + elif state == "absent": changed, response = ensure_absent(connection, parameters, module.check_mode) except VPNConnectionException as e: if e.exception: @@ -799,5 +862,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py index c7a71f154..d304e4568 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_vpn_info version_added: 1.0.0 short_description: Gather information about VPN Connections in AWS. description: - - Gather information about VPN Connections in AWS. -author: Madhura Naniwadekar (@Madhura-CSI) + - Gather information about VPN Connections in AWS. +author: + - Madhura Naniwadekar (@Madhura-CSI) options: filters: description: @@ -30,13 +29,12 @@ options: elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all vpn connections community.aws.ec2_vpc_vpn_info: @@ -52,9 +50,9 @@ EXAMPLES = r''' filters: vpn-gateway-id: vgw-cbe66beb register: vpn_conn_info -''' +""" -RETURN = r''' +RETURN = r""" vpn_connections: description: List of one or more VPN Connections. returned: always @@ -158,30 +156,33 @@ vpn_connections: returned: always type: str sample: vgw-cbe56bfb -''' +""" import json + try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj + return obj.isoformat() if hasattr(obj, "isoformat") else obj def list_vpn_connections(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['VpnConnectionIds'] = module.params.get('vpn_connection_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["VpnConnectionIds"] = module.params.get("vpn_connection_ids") try: result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler)) @@ -189,28 +190,29 @@ def list_vpn_connections(connection, module): module.fail_json_aws(e, msg="Cannot validate JSON data") except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']] + snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result["VpnConnections"]] if snaked_vpn_connections: for vpn_connection in snaked_vpn_connections: - vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', [])) + vpn_connection["tags"] = boto3_tag_list_to_ansible_dict(vpn_connection.get("tags", [])) module.exit_json(changed=False, vpn_connections=snaked_vpn_connections) def main(): - argument_spec = dict( - vpn_connection_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + vpn_connection_ids=dict(default=[], type="list", elements="str"), + filters=dict(default={}, type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['vpn_connection_ids', 'filters']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[["vpn_connection_ids", "filters"]], + supports_check_mode=True, + ) - connection = module.client('ec2') + connection = module.client("ec2") list_vpn_connections(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ec2_win_password.py b/ansible_collections/community/aws/plugins/modules/ec2_win_password.py index 9b92c3e4f..a9ca8e94c 100644 --- a/ansible_collections/community/aws/plugins/modules/ec2_win_password.py +++ b/ansible_collections/community/aws/plugins/modules/ec2_win_password.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_win_password version_added: 1.0.0 short_description: Gets the default administrator password for EC2 Windows instances description: - - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). -author: "Rick Mendes (@rickmendes)" + - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). +author: + - "Rick Mendes (@rickmendes)" options: instance_id: description: @@ -48,16 +47,18 @@ options: default: 120 type: int +requirements: + - cryptography + extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -requirements: -- cryptography -''' +RETURN = r""" # """ -EXAMPLES = ''' +EXAMPLES = r""" # Example of getting a password - name: get the Administrator password community.aws.ec2_win_password: @@ -92,7 +93,7 @@ EXAMPLES = ''' key_file: "~/aws-creds/my_test_key.pem" wait: true wait_timeout: 45 -''' +""" import datetime import time @@ -102,6 +103,7 @@ try: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 from cryptography.hazmat.primitives.serialization import load_pem_private_key + HAS_CRYPTOGRAPHY = True except ImportError: HAS_CRYPTOGRAPHY = False @@ -113,47 +115,48 @@ except ImportError: from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def setup_module_object(): argument_spec = dict( instance_id=dict(required=True), - key_file=dict(required=False, default=None, type='path'), + key_file=dict(required=False, default=None, type="path"), key_passphrase=dict(no_log=True, default=None, required=False), key_data=dict(no_log=True, default=None, required=False), - wait=dict(type='bool', default=False, required=False), - wait_timeout=dict(default=120, required=False, type='int'), + wait=dict(type="bool", default=False, required=False), + wait_timeout=dict(default=120, required=False, type="int"), ) - mutually_exclusive = [['key_file', 'key_data']] + mutually_exclusive = [["key_file", "key_data"]] module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) return module def _get_password(module, client, instance_id): try: - data = client.get_password_data(aws_retry=True, InstanceId=instance_id)['PasswordData'] + data = client.get_password_data(aws_retry=True, InstanceId=instance_id)["PasswordData"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to get password data') + module.fail_json_aws(e, msg="Failed to get password data") return data def ec2_win_password(module): - instance_id = module.params.get('instance_id') - key_file = module.params.get('key_file') - if module.params.get('key_passphrase') is None: + instance_id = module.params.get("instance_id") + key_file = module.params.get("key_file") + if module.params.get("key_passphrase") is None: b_key_passphrase = None else: - b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict') - if module.params.get('key_data') is None: + b_key_passphrase = to_bytes(module.params.get("key_passphrase"), errors="surrogate_or_strict") + if module.params.get("key_data") is None: b_key_data = None else: - b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + b_key_data = to_bytes(module.params.get("key_data"), errors="surrogate_or_strict") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) if wait: start = datetime.datetime.now() @@ -171,15 +174,15 @@ def ec2_win_password(module): decoded = b64decode(data) if wait and datetime.datetime.now() >= end: - module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout) + module.fail_json(msg=f"wait for password timeout after {int(wait_timeout)} seconds") if key_file is not None and b_key_data is None: try: - with open(key_file, 'rb') as f: + with open(key_file, "rb") as f: key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) except IOError as e: # Handle bad files - module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror)) + module.fail_json(msg=f"I/O error ({int(e.errno)}) opening key file: {e.strerror}") except (ValueError, TypeError) as e: # Handle issues loading key module.fail_json(msg="unable to parse key file") @@ -195,7 +198,7 @@ def ec2_win_password(module): decrypted = None if decrypted is None: - module.fail_json(msg="unable to decrypt password", win_password='', changed=False) + module.fail_json(msg="unable to decrypt password", win_password="", changed=False) else: if wait: elapsed = datetime.datetime.now() - start @@ -208,10 +211,10 @@ def main(): module = setup_module_object() if not HAS_CRYPTOGRAPHY: - module.fail_json(msg='cryptography package required for this module.') + module.fail_json(msg="cryptography package required for this module.") ec2_win_password(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_attribute.py b/ansible_collections/community/aws/plugins/modules/ecs_attribute.py index 6efe701d1..682014675 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_attribute.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_attribute.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_attribute version_added: 1.0.0 short_description: manage ecs attributes description: - - Create, update or delete ECS container instance attributes. -author: Andrej Svenke (@anryko) + - Create, update or delete ECS container instance attributes. +author: + - Andrej Svenke (@anryko) options: cluster: description: @@ -54,13 +53,12 @@ options: required: true type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Set attributes @@ -82,9 +80,9 @@ EXAMPLES = r''' - flavor: test - migrated delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" attributes: description: attributes type: complex @@ -108,15 +106,16 @@ attributes: description: value of the attribute returned: if present type: str -''' +""" try: import botocore - from botocore.exceptions import ClientError, EndpointConnectionError + from botocore.exceptions import ClientError + from botocore.exceptions import EndpointConnectionError except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class EcsAttributes(object): @@ -136,29 +135,27 @@ class EcsAttributes(object): @staticmethod def _validate_attrs(attrs): - return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs) + return all(tuple(attr.keys()) in (("name", "value"), ("value", "name")) for attr in attrs) def _parse_attrs(self, attrs): attrs_parsed = [] for attr in attrs: if isinstance(attr, dict): if len(attr) != 1: - self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr)) + self.module.fail_json(msg=f"Incorrect attribute format - {str(attr)}") name, value = list(attr.items())[0] - attrs_parsed.append({'name': name, 'value': value}) + attrs_parsed.append({"name": name, "value": value}) elif isinstance(attr, str): - attrs_parsed.append({'name': attr, 'value': None}) + attrs_parsed.append({"name": attr, "value": None}) else: - self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs)) + self.module.fail_json(msg=f"Incorrect attributes format - {str(attrs)}") return attrs_parsed def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False): - attr_obj = {'targetType': 'container-instance', - 'targetId': ecs_arn, - 'name': name} + attr_obj = {"targetType": "container-instance", "targetId": ecs_arn, "name": name} if not skip_value and value is not None: - attr_obj['value'] = value + attr_obj["value"] = value return attr_obj @@ -187,41 +184,43 @@ class Ec2EcsInstance(object): self.ec2_id = ec2_id try: - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") self.ecs_arn = self._get_ecs_arn() def _get_ecs_arn(self): try: - ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns'] - ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster, - containerInstances=ecs_instances_arns)['containerInstances'] + ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)["containerInstanceArns"] + ec2_instances = self.ecs.describe_container_instances( + cluster=self.cluster, containerInstances=ecs_instances_arns + )["containerInstances"] except (ClientError, EndpointConnectionError) as e: - self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") try: - ecs_arn = next(inst for inst in ec2_instances - if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn'] + ecs_arn = next(inst for inst in ec2_instances if inst["ec2InstanceId"] == self.ec2_id)[ + "containerInstanceArn" + ] except StopIteration: - self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster)) + self.module.fail_json(msg=f"EC2 instance Id not found in ECS cluster - {str(self.cluster)}") return ecs_arn def attrs_put(self, attrs): """Puts attributes on ECS container instance""" try: - self.ecs.put_attributes(cluster=self.cluster, - attributes=attrs.get_for_ecs_arn(self.ecs_arn)) + self.ecs.put_attributes(cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn)) except ClientError as e: self.module.fail_json(msg=str(e)) def attrs_delete(self, attrs): """Deletes attributes from ECS container instance.""" try: - self.ecs.delete_attributes(cluster=self.cluster, - attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True)) + self.ecs.delete_attributes( + cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True) + ) except ClientError as e: self.module.fail_json(msg=str(e)) @@ -230,33 +229,33 @@ class Ec2EcsInstance(object): Returns EcsAttributes object containing attributes from ECS container instance with names matching to attrs.attributes (EcsAttributes Object). """ - attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']} - for attr in attrs] + attr_objs = [{"targetType": "container-instance", "attributeName": attr["name"]} for attr in attrs] try: - matched_ecs_targets = [attr_found for attr_obj in attr_objs - for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']] + matched_ecs_targets = [ + attr_found + for attr_obj in attr_objs + for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)["attributes"] + ] except ClientError as e: - self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") - matched_objs = [target for target in matched_ecs_targets - if target['targetId'] == self.ecs_arn] + matched_objs = [target for target in matched_ecs_targets if target["targetId"] == self.ecs_arn] - results = [{'name': match['name'], 'value': match.get('value', None)} - for match in matched_objs] + results = [{"name": match["name"], "value": match.get("value", None)} for match in matched_objs] return EcsAttributes(self.module, results) def main(): argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - cluster=dict(required=True, type='str'), - ec2_instance_id=dict(required=True, type='str'), - attributes=dict(required=True, type='list', elements='dict'), + state=dict(required=False, default="present", choices=["present", "absent"]), + cluster=dict(required=True, type="str"), + ec2_instance_id=dict(required=True, type="str"), + attributes=dict(required=True, type="list", elements="dict"), ) - required_together = [['cluster', 'ec2_instance_id', 'attributes']] + required_together = [["cluster", "ec2_instance_id", "attributes"]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -264,39 +263,43 @@ def main(): required_together=required_together, ) - cluster = module.params['cluster'] - ec2_instance_id = module.params['ec2_instance_id'] - attributes = module.params['attributes'] + cluster = module.params["cluster"] + ec2_instance_id = module.params["ec2_instance_id"] + attributes = module.params["attributes"] conti = Ec2EcsInstance(module, cluster, ec2_instance_id) attrs = EcsAttributes(module, attributes) - results = {'changed': False, - 'attributes': [ - {'cluster': cluster, - 'ec2_instance_id': ec2_instance_id, - 'attributes': attributes} - ]} + results = { + "changed": False, + "attributes": [ + { + "cluster": cluster, + "ec2_instance_id": ec2_instance_id, + "attributes": attributes, + } + ], + } attrs_present = conti.attrs_get_by_name(attrs) - if module.params['state'] == 'present': + if module.params["state"] == "present": attrs_diff = attrs.diff(attrs_present) if not attrs_diff: module.exit_json(**results) conti.attrs_put(attrs_diff) - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not attrs_present: module.exit_json(**results) conti.attrs_delete(attrs_present) - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_cluster.py b/ansible_collections/community/aws/plugins/modules/ecs_cluster.py index 347e2173e..7d427a58d 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_cluster.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_cluster.py @@ -1,22 +1,21 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ecs_cluster version_added: 1.0.0 short_description: Create or terminate ECS clusters. notes: - - When deleting a cluster, the information returned is the state of the cluster prior to deletion. - - It will also wait for a cluster to have instances registered to it. + - When deleting a cluster, the information returned is the state of the cluster prior to deletion. + - It will also wait for a cluster to have instances registered to it. description: - - Creates or terminates ecs clusters. -author: Mark Chance (@Java1Guy) + - Creates or terminates ecs clusters. +author: + - Mark Chance (@Java1Guy) options: state: description: @@ -78,13 +77,12 @@ options: type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Cluster creation @@ -105,7 +103,7 @@ EXAMPLES = ''' weight: 1 - capacity_provider: FARGATE_SPOT weight: 100 - purge_capacity_providers: True + purge_capacity_providers: true - name: Cluster deletion community.aws.ecs_cluster: @@ -119,9 +117,9 @@ EXAMPLES = ''' delay: 10 repeat: 10 register: task_output +""" -''' -RETURN = ''' +RETURN = r""" activeServicesCount: description: how many services are active in this cluster returned: 0 if a new cluster @@ -163,7 +161,7 @@ status: returned: always type: str sample: ACTIVE -''' +""" import time @@ -172,9 +170,10 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class EcsClusterManager: @@ -183,76 +182,75 @@ class EcsClusterManager: def __init__(self, module): self.module = module try: - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'): + def find_in_array(self, array_of_clusters, cluster_name, field_name="clusterArn"): for c in array_of_clusters: if c[field_name].endswith(cluster_name): return c return None def describe_cluster(self, cluster_name): - response = self.ecs.describe_clusters(clusters=[ - cluster_name - ]) - if len(response['failures']) > 0: - c = self.find_in_array(response['failures'], cluster_name, 'arn') - if c and c['reason'] == 'MISSING': + response = self.ecs.describe_clusters(clusters=[cluster_name]) + if len(response["failures"]) > 0: + c = self.find_in_array(response["failures"], cluster_name, "arn") + if c and c["reason"] == "MISSING": return None # fall thru and look through found ones - if len(response['clusters']) > 0: - c = self.find_in_array(response['clusters'], cluster_name) + if len(response["clusters"]) > 0: + c = self.find_in_array(response["clusters"], cluster_name) if c: return c - raise Exception("Unknown problem describing cluster %s." % cluster_name) + raise Exception(f"Unknown problem describing cluster {cluster_name}.") def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(clusterName=cluster_name) if capacity_providers: - params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) if capacity_provider_strategy: - params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) response = self.ecs.create_cluster(**params) - return response['cluster'] + return response["cluster"] def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(cluster=cluster_name) if capacity_providers: - params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) else: - params['capacityProviders'] = [] + params["capacityProviders"] = [] if capacity_provider_strategy: - params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) else: - params['defaultCapacityProviderStrategy'] = [] + params["defaultCapacityProviderStrategy"] = [] response = self.ecs.put_cluster_capacity_providers(**params) - return response['cluster'] + return response["cluster"] def delete_cluster(self, clusterName): return self.ecs.delete_cluster(cluster=clusterName) def main(): - argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'has_instances']), - name=dict(required=True, type='str'), - delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10), - purge_capacity_providers=dict(required=False, type='bool', default=False), - capacity_providers=dict(required=False, type='list', elements='str'), - capacity_provider_strategy=dict(required=False, - type='list', - elements='dict', - options=dict(capacity_provider=dict(type='str'), - weight=dict(type='int'), - base=dict(type='int', default=0) - ) - ), + state=dict(required=True, choices=["present", "absent", "has_instances"]), + name=dict(required=True, type="str"), + delay=dict(required=False, type="int", default=10), + repeat=dict(required=False, type="int", default=10), + purge_capacity_providers=dict(required=False, type="bool", default=False), + capacity_providers=dict(required=False, type="list", elements="str"), + capacity_provider_strategy=dict( + required=False, + type="list", + elements="dict", + options=dict( + capacity_provider=dict(type="str"), + weight=dict(type="int"), + base=dict(type="int", default=0), + ), + ), ) - required_together = [['state', 'name']] + required_together = [["state", "name"]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -262,19 +260,19 @@ def main(): cluster_mgr = EcsClusterManager(module) try: - existing = cluster_mgr.describe_cluster(module.params['name']) + existing = cluster_mgr.describe_cluster(module.params["name"]) except Exception as e: - module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e)) + module.fail_json(msg="Exception describing cluster '" + module.params["name"] + "': " + str(e)) results = dict(changed=False) - if module.params['state'] == 'present': + if module.params["state"] == "present": # Pull requested and existing capacity providers and strategies. - purge_capacity_providers = module.params['purge_capacity_providers'] - requested_cp = module.params['capacity_providers'] - requested_cps = module.params['capacity_provider_strategy'] - if existing and 'status' in existing and existing['status'] == "ACTIVE": - existing_cp = existing['capacityProviders'] - existing_cps = existing['defaultCapacityProviderStrategy'] + purge_capacity_providers = module.params["purge_capacity_providers"] + requested_cp = module.params["capacity_providers"] + requested_cps = module.params["capacity_provider_strategy"] + if existing and "status" in existing and existing["status"] == "ACTIVE": + existing_cp = existing["capacityProviders"] + existing_cps = existing["defaultCapacityProviderStrategy"] if requested_cp is None: requested_cp = [] @@ -293,9 +291,12 @@ def main(): # Unless purge_capacity_providers is true, we will not be updating the providers or strategy. if not purge_capacity_providers: - module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.' - ' To maintain the existing behaviour explicitly set purge_capacity_providers=true', - date='2024-06-01', collection_name='community.aws') + module.deprecate( + "After 2024-06-01 the default value of purge_capacity_providers will change from false to true." + " To maintain the existing behaviour explicitly set purge_capacity_providers=true", + date="2024-06-01", + collection_name="community.aws", + ) cps_update_needed = False requested_cp = existing_cp requested_cps = existing_cps @@ -303,57 +304,67 @@ def main(): # If either the providers or strategy differ, update the cluster. if requested_cp != existing_cp or cps_update_needed: if not module.check_mode: - results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps) - results['changed'] = True + results["cluster"] = cluster_mgr.update_cluster( + cluster_name=module.params["name"], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps, + ) + results["changed"] = True else: - results['cluster'] = existing + results["cluster"] = existing else: if not module.check_mode: # doesn't exist. create it. - results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps) - results['changed'] = True + results["cluster"] = cluster_mgr.create_cluster( + cluster_name=module.params["name"], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps, + ) + results["changed"] = True # delete the cluster - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - results['cluster'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + results["cluster"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: - cluster_mgr.delete_cluster(module.params['name']) - results['changed'] = True - elif module.params['state'] == 'has_instances': + cluster_mgr.delete_cluster(module.params["name"]) + results["changed"] = True + elif module.params["state"] == "has_instances": if not existing: - module.fail_json(msg="Cluster '" + module.params['name'] + " not found.") + module.fail_json(msg="Cluster '" + module.params["name"] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted - delay = module.params['delay'] - repeat = module.params['repeat'] + delay = module.params["delay"] + repeat = module.params["repeat"] time.sleep(delay) count = 0 for i in range(repeat): - existing = cluster_mgr.describe_cluster(module.params['name']) - count = existing['registeredContainerInstancesCount'] + existing = cluster_mgr.describe_cluster(module.params["name"]) + count = existing["registeredContainerInstancesCount"] if count > 0: - results['changed'] = True + results["changed"] = True break time.sleep(delay) if count == 0 and i is repeat - 1: - module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.") + module.fail_json( + msg="Cluster instance count still zero after " + + str(repeat) + + " tries of " + + str(delay) + + " seconds each." + ) return module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_ecr.py b/ansible_collections/community/aws/plugins/modules/ecs_ecr.py index d83d5af2e..545b82742 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_ecr.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_ecr.py @@ -1,15 +1,10 @@ #!/usr/bin/python -# -*- coding: utf-8 -* +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ecs_ecr version_added: 1.0.0 @@ -104,15 +99,14 @@ options: type: dict version_added: 5.2.0 author: - - David M. Lee (@leedm777) + - David M. Lee (@leedm777) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # If the repository does not exist, it is created. If it does exist, would not # affect any policies already on it. - name: ecr-repo @@ -186,9 +180,9 @@ EXAMPLES = ''' encryption_configuration: encryption_type: KMS kms_key: custom-kms-key-alias -''' +""" -RETURN = ''' +RETURN = r""" state: type: str description: The asserted state of the repository (present, absent) @@ -216,7 +210,7 @@ repository: repositoryArn: arn:aws:ecr:us-east-1:123456789012:repository/ecr-test-1484664090 repositoryName: ecr-test-1484664090 repositoryUri: 123456789012.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090 -''' +""" import json import traceback @@ -229,11 +223,11 @@ except ImportError: from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import sort_json_policy_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def build_kwargs(registry_id): @@ -251,45 +245,45 @@ def build_kwargs(registry_id): class EcsEcr: def __init__(self, module): - self.ecr = module.client('ecr') - self.sts = module.client('sts') + self.ecr = module.client("ecr") + self.sts = module.client("sts") self.check_mode = module.check_mode self.changed = False self.skipped = False def get_repository(self, registry_id, name): try: - res = self.ecr.describe_repositories( - repositoryNames=[name], **build_kwargs(registry_id)) - repos = res.get('repositories') + res = self.ecr.describe_repositories(repositoryNames=[name], **build_kwargs(registry_id)) + repos = res.get("repositories") return repos and repos[0] - except is_boto3_error_code('RepositoryNotFoundException'): + except is_boto3_error_code("RepositoryNotFoundException"): return None def get_repository_policy(self, registry_id, name): try: - res = self.ecr.get_repository_policy( - repositoryName=name, **build_kwargs(registry_id)) - text = res.get('policyText') + res = self.ecr.get_repository_policy(repositoryName=name, **build_kwargs(registry_id)) + text = res.get("policyText") return text and json.loads(text) - except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']): + except is_boto3_error_code(["RepositoryNotFoundException", "RepositoryPolicyNotFoundException"]): return None def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration): if registry_id: - default_registry_id = self.sts.get_caller_identity().get('Account') + default_registry_id = self.sts.get_caller_identity().get("Account") if registry_id != default_registry_id: - raise Exception('Cannot create repository in registry {0}.' - 'Would be created in {1} instead.'.format(registry_id, default_registry_id)) + raise Exception( + f"Cannot create repository in registry {registry_id}. Would be created in {default_registry_id} instead." + ) if encryption_configuration is None: - encryption_configuration = dict(encryptionType='AES256') + encryption_configuration = dict(encryptionType="AES256") if not self.check_mode: repo = self.ecr.create_repository( repositoryName=name, imageTagMutability=image_tag_mutability, - encryptionConfiguration=encryption_configuration).get('repository') + encryptionConfiguration=encryption_configuration, + ).get("repository") self.changed = True return repo else: @@ -299,10 +293,8 @@ class EcsEcr: def set_repository_policy(self, registry_id, name, policy_text, force): if not self.check_mode: policy = self.ecr.set_repository_policy( - repositoryName=name, - policyText=policy_text, - force=force, - **build_kwargs(registry_id)) + repositoryName=name, policyText=policy_text, force=force, **build_kwargs(registry_id) + ) self.changed = True return policy else: @@ -310,15 +302,13 @@ class EcsEcr: if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = '{0}:{1}'.format(registry_id, name) - raise Exception( - 'could not find repository {0}'.format(printable)) + printable = f"{registry_id}:{name}" + raise Exception(f"could not find repository {printable}") return def delete_repository(self, registry_id, name, force): if not self.check_mode: - repo = self.ecr.delete_repository( - repositoryName=name, force=force, **build_kwargs(registry_id)) + repo = self.ecr.delete_repository(repositoryName=name, force=force, **build_kwargs(registry_id)) self.changed = True return repo else: @@ -330,8 +320,7 @@ class EcsEcr: def delete_repository_policy(self, registry_id, name): if not self.check_mode: - policy = self.ecr.delete_repository_policy( - repositoryName=name, **build_kwargs(registry_id)) + policy = self.ecr.delete_repository_policy(repositoryName=name, **build_kwargs(registry_id)) self.changed = True return policy else: @@ -343,36 +332,33 @@ class EcsEcr: def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration): repo = self.get_repository(registry_id, name) - current_mutability_configuration = repo.get('imageTagMutability') + current_mutability_configuration = repo.get("imageTagMutability") if current_mutability_configuration != new_mutability_configuration: if not self.check_mode: self.ecr.put_image_tag_mutability( - repositoryName=name, - imageTagMutability=new_mutability_configuration, - **build_kwargs(registry_id)) + repositoryName=name, imageTagMutability=new_mutability_configuration, **build_kwargs(registry_id) + ) else: self.skipped = True self.changed = True - repo['imageTagMutability'] = new_mutability_configuration + repo["imageTagMutability"] = new_mutability_configuration return repo def get_lifecycle_policy(self, registry_id, name): try: - res = self.ecr.get_lifecycle_policy( - repositoryName=name, **build_kwargs(registry_id)) - text = res.get('lifecyclePolicyText') + res = self.ecr.get_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) + text = res.get("lifecyclePolicyText") return text and json.loads(text) - except is_boto3_error_code(['LifecyclePolicyNotFoundException', 'RepositoryNotFoundException']): + except is_boto3_error_code(["LifecyclePolicyNotFoundException", "RepositoryNotFoundException"]): return None def put_lifecycle_policy(self, registry_id, name, policy_text): if not self.check_mode: policy = self.ecr.put_lifecycle_policy( - repositoryName=name, - lifecyclePolicyText=policy_text, - **build_kwargs(registry_id)) + repositoryName=name, lifecyclePolicyText=policy_text, **build_kwargs(registry_id) + ) self.changed = True return policy else: @@ -380,15 +366,13 @@ class EcsEcr: if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = '{0}:{1}'.format(registry_id, name) - raise Exception( - 'could not find repository {0}'.format(printable)) + printable = f"{registry_id}:{name}" + raise Exception(f"could not find repository {printable}") return def purge_lifecycle_policy(self, registry_id, name): if not self.check_mode: - policy = self.ecr.delete_lifecycle_policy( - repositoryName=name, **build_kwargs(registry_id)) + policy = self.ecr.delete_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) self.changed = True return policy else: @@ -402,14 +386,11 @@ class EcsEcr: if not self.check_mode: if registry_id: scan = self.ecr.put_image_scanning_configuration( - registryId=registry_id, - repositoryName=name, - imageScanningConfiguration={'scanOnPush': scan_on_push} + registryId=registry_id, repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} ) else: scan = self.ecr.put_image_scanning_configuration( - repositoryName=name, - imageScanningConfiguration={'scanOnPush': scan_on_push} + repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} ) self.changed = True return scan @@ -419,11 +400,11 @@ class EcsEcr: def sort_lists_of_strings(policy): - for statement_index in range(0, len(policy.get('Statement', []))): - for key in policy['Statement'][statement_index]: - value = policy['Statement'][statement_index][key] + for statement_index in range(0, len(policy.get("Statement", []))): + for key in policy["Statement"][statement_index]: + value = policy["Statement"][statement_index][key] if isinstance(value, list) and all(isinstance(item, string_types) for item in value): - policy['Statement'][statement_index][key] = sorted(value) + policy["Statement"][statement_index][key] = sorted(value) return policy @@ -431,151 +412,138 @@ def run(ecr, params): # type: (EcsEcr, dict, int) -> Tuple[bool, dict] result = {} try: - name = params['name'] - state = params['state'] - policy_text = params['policy'] - purge_policy = params['purge_policy'] - force_absent = params['force_absent'] - registry_id = params['registry_id'] - force_set_policy = params['force_set_policy'] - image_tag_mutability = params['image_tag_mutability'].upper() - lifecycle_policy_text = params['lifecycle_policy'] - purge_lifecycle_policy = params['purge_lifecycle_policy'] - scan_on_push = params['scan_on_push'] - encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration']) + name = params["name"] + state = params["state"] + policy_text = params["policy"] + purge_policy = params["purge_policy"] + force_absent = params["force_absent"] + registry_id = params["registry_id"] + force_set_policy = params["force_set_policy"] + image_tag_mutability = params["image_tag_mutability"].upper() + lifecycle_policy_text = params["lifecycle_policy"] + purge_lifecycle_policy = params["purge_lifecycle_policy"] + scan_on_push = params["scan_on_push"] + encryption_configuration = snake_dict_to_camel_dict(params["encryption_configuration"]) # Parse policies, if they are given try: policy = policy_text and json.loads(policy_text) except ValueError: - result['policy'] = policy_text - result['msg'] = 'Could not parse policy' + result["policy"] = policy_text + result["msg"] = "Could not parse policy" return False, result try: - lifecycle_policy = \ - lifecycle_policy_text and json.loads(lifecycle_policy_text) + lifecycle_policy = lifecycle_policy_text and json.loads(lifecycle_policy_text) except ValueError: - result['lifecycle_policy'] = lifecycle_policy_text - result['msg'] = 'Could not parse lifecycle_policy' + result["lifecycle_policy"] = lifecycle_policy_text + result["msg"] = "Could not parse lifecycle_policy" return False, result - result['state'] = state - result['created'] = False + result["state"] = state + result["created"] = False repo = ecr.get_repository(registry_id, name) - if state == 'present': - result['created'] = False + if state == "present": + result["created"] = False if not repo: - repo = ecr.create_repository( - registry_id, name, image_tag_mutability, encryption_configuration) - result['changed'] = True - result['created'] = True + repo = ecr.create_repository(registry_id, name, image_tag_mutability, encryption_configuration) + result["changed"] = True + result["created"] = True else: if encryption_configuration is not None: - if repo.get('encryptionConfiguration') != encryption_configuration: - result['msg'] = 'Cannot modify repository encryption type' + if repo.get("encryptionConfiguration") != encryption_configuration: + result["msg"] = "Cannot modify repository encryption type" return False, result repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) - result['repository'] = repo + result["repository"] = repo if purge_lifecycle_policy: - original_lifecycle_policy = \ - ecr.get_lifecycle_policy(registry_id, name) + original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) - result['lifecycle_policy'] = None + result["lifecycle_policy"] = None if original_lifecycle_policy: ecr.purge_lifecycle_policy(registry_id, name) - result['changed'] = True + result["changed"] = True elif lifecycle_policy_text is not None: try: - lifecycle_policy = sort_json_policy_dict(lifecycle_policy) - result['lifecycle_policy'] = lifecycle_policy + result["lifecycle_policy"] = lifecycle_policy + original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) - original_lifecycle_policy = ecr.get_lifecycle_policy( - registry_id, name) - - if original_lifecycle_policy: - original_lifecycle_policy = sort_json_policy_dict( - original_lifecycle_policy) - - if original_lifecycle_policy != lifecycle_policy: - ecr.put_lifecycle_policy(registry_id, name, - lifecycle_policy_text) - result['changed'] = True + if compare_policies(original_lifecycle_policy, lifecycle_policy): + ecr.put_lifecycle_policy(registry_id, name, lifecycle_policy_text) + result["changed"] = True except Exception: # Some failure w/ the policy. It's helpful to know what the # policy is. - result['lifecycle_policy'] = lifecycle_policy_text + result["lifecycle_policy"] = lifecycle_policy_text raise if purge_policy: original_policy = ecr.get_repository_policy(registry_id, name) - result['policy'] = None + result["policy"] = None if original_policy: ecr.delete_repository_policy(registry_id, name) - result['changed'] = True + result["changed"] = True elif policy_text is not None: try: # Sort any lists containing only string types policy = sort_lists_of_strings(policy) - result['policy'] = policy + result["policy"] = policy - original_policy = ecr.get_repository_policy( - registry_id, name) + original_policy = ecr.get_repository_policy(registry_id, name) if original_policy: original_policy = sort_lists_of_strings(original_policy) if compare_policies(original_policy, policy): - ecr.set_repository_policy( - registry_id, name, policy_text, force_set_policy) - result['changed'] = True + ecr.set_repository_policy(registry_id, name, policy_text, force_set_policy) + result["changed"] = True except Exception: # Some failure w/ the policy. It's helpful to know what the # policy is. - result['policy'] = policy_text + result["policy"] = policy_text raise else: original_policy = ecr.get_repository_policy(registry_id, name) if original_policy: - result['policy'] = original_policy + result["policy"] = original_policy original_scan_on_push = ecr.get_repository(registry_id, name) if original_scan_on_push is not None: - if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']: - result['changed'] = True - result['repository']['imageScanningConfiguration']['scanOnPush'] = scan_on_push + if scan_on_push != original_scan_on_push["imageScanningConfiguration"]["scanOnPush"]: + result["changed"] = True + result["repository"]["imageScanningConfiguration"]["scanOnPush"] = scan_on_push response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push) - elif state == 'absent': - result['name'] = name + elif state == "absent": + result["name"] = name if repo: ecr.delete_repository(registry_id, name, force_absent) - result['changed'] = True + result["changed"] = True except Exception as err: msg = str(err) if isinstance(err, botocore.exceptions.ClientError): msg = boto_exception(err) - result['msg'] = msg - result['exception'] = traceback.format_exc() + result["msg"] = msg + result["exception"] = traceback.format_exc() return False, result if ecr.skipped: - result['skipped'] = True + result["skipped"] = True if ecr.changed: - result['changed'] = True + result["changed"] = True return True, result @@ -584,34 +552,37 @@ def main(): argument_spec = dict( name=dict(required=True), registry_id=dict(required=False), - state=dict(required=False, choices=['present', 'absent'], - default='present'), - force_absent=dict(required=False, type='bool', default=False), - force_set_policy=dict(required=False, type='bool', default=False), - policy=dict(required=False, type='json'), - image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], - default='mutable'), - purge_policy=dict(required=False, type='bool'), - lifecycle_policy=dict(required=False, type='json'), - purge_lifecycle_policy=dict(required=False, type='bool'), - scan_on_push=(dict(required=False, type='bool', default=False)), + state=dict(required=False, choices=["present", "absent"], default="present"), + force_absent=dict(required=False, type="bool", default=False), + force_set_policy=dict(required=False, type="bool", default=False), + policy=dict(required=False, type="json"), + image_tag_mutability=dict(required=False, choices=["mutable", "immutable"], default="mutable"), + purge_policy=dict(required=False, type="bool"), + lifecycle_policy=dict(required=False, type="json"), + purge_lifecycle_policy=dict(required=False, type="bool"), + scan_on_push=(dict(required=False, type="bool", default=False)), encryption_configuration=dict( required=False, - type='dict', + type="dict", options=dict( - encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']), - kms_key=dict(required=False, type='str', no_log=False), + encryption_type=dict(required=False, type="str", default="AES256", choices=["AES256", "KMS"]), + kms_key=dict(required=False, type="str", no_log=False), ), required_if=[ - ['encryption_type', 'KMS', ['kms_key']], + ["encryption_type", "KMS", ["kms_key"]], ], ), ) mutually_exclusive = [ - ['policy', 'purge_policy'], - ['lifecycle_policy', 'purge_lifecycle_policy']] - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + ["policy", "purge_policy"], + ["lifecycle_policy", "purge_lifecycle_policy"], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) ecr = EcsEcr(module) passed, result = run(ecr, module.params) @@ -622,5 +593,5 @@ def main(): module.fail_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_service.py b/ansible_collections/community/aws/plugins/modules/ecs_service.py index 2d86a6bd5..e832fa3b5 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_service.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_service.py @@ -1,11 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_service version_added: 1.0.0 @@ -297,12 +296,12 @@ options: required: false version_added: 4.1.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic provisioning example - community.aws.ecs_service: @@ -321,10 +320,10 @@ EXAMPLES = r''' desired_count: 0 network_configuration: subnets: - - subnet-abcd1234 + - subnet-abcd1234 security_groups: - - sg-aaaa1111 - - my_security_group + - sg-aaaa1111 + - my_security_group # Simple example to delete - community.aws.ecs_service: @@ -358,8 +357,8 @@ EXAMPLES = r''' desired_count: 3 deployment_configuration: deployment_circuit_breaker: - enable: True - rollback: True + enable: true + rollback: true # With capacity_provider_strategy (added in version 4.0) - community.aws.ecs_service: @@ -384,9 +383,9 @@ EXAMPLES = r''' Firstname: jane lastName: doe propagate_tags: SERVICE -''' +""" -RETURN = r''' +RETURN = r""" service: description: Details of created service. returned: when creating a service @@ -678,31 +677,33 @@ ansible_facts: returned: always type: str -''' -import time +""" -DEPLOYMENT_CONTROLLER_TYPE_MAP = { - 'type': 'str', -} +import time -DEPLOYMENT_CONFIGURATION_TYPE_MAP = { - 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int', - 'deployment_circuit_breaker': 'dict', -} +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import map_complex_type from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import map_complex_type -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + +DEPLOYMENT_CONTROLLER_TYPE_MAP = { + "type": "str", +} + +DEPLOYMENT_CONFIGURATION_TYPE_MAP = { + "maximum_percent": "int", + "minimum_healthy_percent": "int", + "deployment_circuit_breaker": "dict", +} class EcsServiceManager: @@ -710,32 +711,32 @@ class EcsServiceManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') - self.ec2 = module.client('ec2') + self.ecs = module.client("ecs") + self.ec2 = module.client("ec2") def format_network_configuration(self, network_config): result = dict() - if network_config['subnets'] is not None: - result['subnets'] = network_config['subnets'] + if network_config["subnets"] is not None: + result["subnets"] = network_config["subnets"] else: self.module.fail_json(msg="Network configuration must include subnets") - if network_config['security_groups'] is not None: - groups = network_config['security_groups'] - if any(not sg.startswith('sg-') for sg in groups): + if network_config["security_groups"] is not None: + groups = network_config["security_groups"] + if any(not sg.startswith("sg-") for sg in groups): try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result['securityGroups'] = groups - if network_config['assign_public_ip'] is not None: - if network_config['assign_public_ip'] is True: - result['assignPublicIp'] = "ENABLED" + result["securityGroups"] = groups + if network_config["assign_public_ip"] is not None: + if network_config["assign_public_ip"] is True: + result["assignPublicIp"] = "ENABLED" else: - result['assignPublicIp'] = "DISABLED" + result["assignPublicIp"] = "DISABLED" return dict(awsvpcConfiguration=result) - def find_in_array(self, array_of_services, service_name, field_name='serviceArn'): + def find_in_array(self, array_of_services, service_name, field_name="serviceArn"): for c in array_of_services: if c[field_name].endswith(service_name): return c @@ -745,42 +746,42 @@ class EcsServiceManager: response = self.ecs.describe_services( cluster=cluster_name, services=[service_name], - include=['TAGS'], + include=["TAGS"], ) - msg = '' + msg = "" - if len(response['failures']) > 0: - c = self.find_in_array(response['failures'], service_name, 'arn') - msg += ", failure reason is " + c['reason'] - if c and c['reason'] == 'MISSING': + if len(response["failures"]) > 0: + c = self.find_in_array(response["failures"], service_name, "arn") + msg += ", failure reason is " + c["reason"] + if c and c["reason"] == "MISSING": return None # fall thru and look through found ones - if len(response['services']) > 0: - c = self.find_in_array(response['services'], service_name) + if len(response["services"]) > 0: + c = self.find_in_array(response["services"], service_name) if c: return c - raise Exception("Unknown problem describing service %s." % service_name) + raise Exception(f"Unknown problem describing service {service_name}.") def is_matching_service(self, expected, existing): # aws returns the arn of the task definition # arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3 # but the user is just entering # ansible-fargate-nginx:3 - if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: - if existing.get('deploymentController', {}).get('type', None) != 'CODE_DEPLOY': + if expected["task_definition"] != existing["taskDefinition"].split("/")[-1]: + if existing.get("deploymentController", {}).get("type", None) != "CODE_DEPLOY": return False - if expected.get('health_check_grace_period_seconds'): - if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): + if expected.get("health_check_grace_period_seconds"): + if expected.get("health_check_grace_period_seconds") != existing.get("healthCheckGracePeriodSeconds"): return False - if (expected['load_balancers'] or []) != existing['loadBalancers']: + if (expected["load_balancers"] or []) != existing["loadBalancers"]: return False - if (expected['propagate_tags'] or "NONE") != existing['propagateTags']: + if (expected["propagate_tags"] or "NONE") != existing["propagateTags"]: return False - if boto3_tag_list_to_ansible_dict(existing.get('tags', [])) != (expected['tags'] or {}): + if boto3_tag_list_to_ansible_dict(existing.get("tags", [])) != (expected["tags"] or {}): return False if (expected["enable_execute_command"] or False) != existing.get("enableExecuteCommand", False): @@ -788,8 +789,8 @@ class EcsServiceManager: # expected is params. DAEMON scheduling strategy returns desired count equal to # number of instances running; don't check desired count if scheduling strat is daemon - if (expected['scheduling_strategy'] != 'DAEMON'): - if (expected['desired_count'] or 0) != existing['desiredCount']: + if expected["scheduling_strategy"] != "DAEMON": + if (expected["desired_count"] or 0) != existing["desiredCount"]: return False return True @@ -818,7 +819,6 @@ class EcsServiceManager: propagate_tags, enable_execute_command, ): - params = dict( cluster=cluster_name, serviceName=service_name, @@ -827,47 +827,49 @@ class EcsServiceManager: clientToken=client_token, role=role, deploymentConfiguration=deployment_configuration, - placementStrategy=placement_strategy + placementStrategy=placement_strategy, ) if network_configuration: - params['networkConfiguration'] = network_configuration + params["networkConfiguration"] = network_configuration if deployment_controller: - params['deploymentController'] = deployment_controller + params["deploymentController"] = deployment_controller if launch_type: - params['launchType'] = launch_type + params["launchType"] = launch_type if platform_version: - params['platformVersion'] = platform_version + params["platformVersion"] = platform_version if self.health_check_setable(params) and health_check_grace_period_seconds is not None: - params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds if service_registries: - params['serviceRegistries'] = service_registries + params["serviceRegistries"] = service_registries # filter placement_constraint and left only those where value is not None # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation if placement_constraints: - params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints] + params["placementConstraints"] = [ + {key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints + ] # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if capacity_provider_strategy: - params['capacityProviderStrategy'] = capacity_provider_strategy + params["capacityProviderStrategy"] = capacity_provider_strategy if propagate_tags: - params['propagateTags'] = propagate_tags + params["propagateTags"] = propagate_tags # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if tags: - params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") if scheduling_strategy: - params['schedulingStrategy'] = scheduling_strategy + params["schedulingStrategy"] = scheduling_strategy if enable_execute_command: params["enableExecuteCommand"] = enable_execute_command response = self.ecs.create_service(**params) - return self.jsonize(response['service']) + return self.jsonize(response["service"]) def update_service( self, @@ -891,242 +893,262 @@ class EcsServiceManager: cluster=cluster_name, service=service_name, taskDefinition=task_definition, - deploymentConfiguration=deployment_configuration) + deploymentConfiguration=deployment_configuration, + ) # filter placement_constraint and left only those where value is not None # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation if placement_constraints: - params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints] + params["placementConstraints"] = [ + {key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints + ] if purge_placement_constraints and not placement_constraints: - params['placementConstraints'] = [] + params["placementConstraints"] = [] if placement_strategy: - params['placementStrategy'] = placement_strategy + params["placementStrategy"] = placement_strategy if purge_placement_strategy and not placement_strategy: - params['placementStrategy'] = [] + params["placementStrategy"] = [] if network_configuration: - params['networkConfiguration'] = network_configuration + params["networkConfiguration"] = network_configuration if force_new_deployment: - params['forceNewDeployment'] = force_new_deployment + params["forceNewDeployment"] = force_new_deployment if capacity_provider_strategy: - params['capacityProviderStrategy'] = capacity_provider_strategy + params["capacityProviderStrategy"] = capacity_provider_strategy if health_check_grace_period_seconds is not None: - params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if enable_execute_command is not None: params["enableExecuteCommand"] = enable_execute_command if load_balancers: - params['loadBalancers'] = load_balancers + params["loadBalancers"] = load_balancers response = self.ecs.update_service(**params) - return self.jsonize(response['service']) + return self.jsonize(response["service"]) def jsonize(self, service): # some fields are datetime which is not JSON serializable # make them strings - if 'createdAt' in service: - service['createdAt'] = str(service['createdAt']) - if 'deployments' in service: - for d in service['deployments']: - if 'createdAt' in d: - d['createdAt'] = str(d['createdAt']) - if 'updatedAt' in d: - d['updatedAt'] = str(d['updatedAt']) - if 'events' in service: - for e in service['events']: - if 'createdAt' in e: - e['createdAt'] = str(e['createdAt']) + if "createdAt" in service: + service["createdAt"] = str(service["createdAt"]) + if "deployments" in service: + for d in service["deployments"]: + if "createdAt" in d: + d["createdAt"] = str(d["createdAt"]) + if "updatedAt" in d: + d["updatedAt"] = str(d["updatedAt"]) + if "events" in service: + for e in service["events"]: + if "createdAt" in e: + e["createdAt"] = str(e["createdAt"]) return service def delete_service(self, service, cluster=None, force=False): return self.ecs.delete_service(cluster=cluster, service=service, force=force) def health_check_setable(self, params): - load_balancers = params.get('loadBalancers', []) + load_balancers = params.get("loadBalancers", []) return len(load_balancers) > 0 def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'deleting']), - name=dict(required=True, type='str', aliases=['service']), - cluster=dict(required=False, type='str', default='default'), - task_definition=dict(required=False, type='str'), - load_balancers=dict(required=False, default=[], type='list', elements='dict'), - desired_count=dict(required=False, type='int'), - client_token=dict(required=False, default='', type='str', no_log=False), - role=dict(required=False, default='', type='str'), - delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10), - force_new_deployment=dict(required=False, default=False, type='bool'), - force_deletion=dict(required=False, default=False, type='bool'), - deployment_controller=dict(required=False, default={}, type='dict'), - deployment_configuration=dict(required=False, default={}, type='dict'), - wait=dict(required=False, default=False, type='bool'), + state=dict(required=True, choices=["present", "absent", "deleting"]), + name=dict(required=True, type="str", aliases=["service"]), + cluster=dict(required=False, type="str", default="default"), + task_definition=dict(required=False, type="str"), + load_balancers=dict(required=False, default=[], type="list", elements="dict"), + desired_count=dict(required=False, type="int"), + client_token=dict(required=False, default="", type="str", no_log=False), + role=dict(required=False, default="", type="str"), + delay=dict(required=False, type="int", default=10), + repeat=dict(required=False, type="int", default=10), + force_new_deployment=dict(required=False, default=False, type="bool"), + force_deletion=dict(required=False, default=False, type="bool"), + deployment_controller=dict(required=False, default={}, type="dict"), + deployment_configuration=dict(required=False, default={}, type="dict"), + wait=dict(required=False, default=False, type="bool"), placement_constraints=dict( required=False, default=[], - type='list', - elements='dict', - options=dict( - type=dict(type='str'), - expression=dict(required=False, type='str') - ) + type="list", + elements="dict", + options=dict(type=dict(type="str"), expression=dict(required=False, type="str")), ), - purge_placement_constraints=dict(required=False, default=False, type='bool'), + purge_placement_constraints=dict(required=False, default=False, type="bool"), placement_strategy=dict( required=False, default=[], - type='list', - elements='dict', + type="list", + elements="dict", + options=dict( + type=dict(type="str"), + field=dict(type="str"), + ), + ), + purge_placement_strategy=dict(required=False, default=False, type="bool"), + health_check_grace_period_seconds=dict(required=False, type="int"), + network_configuration=dict( + required=False, + type="dict", options=dict( - type=dict(type='str'), - field=dict(type='str'), - ) + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + assign_public_ip=dict(type="bool"), + ), ), - purge_placement_strategy=dict(required=False, default=False, type='bool'), - health_check_grace_period_seconds=dict(required=False, type='int'), - network_configuration=dict(required=False, type='dict', options=dict( - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - assign_public_ip=dict(type='bool') - )), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - platform_version=dict(required=False, type='str'), - service_registries=dict(required=False, type='list', default=[], elements='dict'), - scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), + platform_version=dict(required=False, type="str"), + service_registries=dict(required=False, type="list", default=[], elements="dict"), + scheduling_strategy=dict(required=False, choices=["DAEMON", "REPLICA"]), capacity_provider_strategy=dict( required=False, - type='list', + type="list", default=[], - elements='dict', + elements="dict", options=dict( - capacity_provider=dict(type='str'), - weight=dict(type='int'), - base=dict(type='int') - ) + capacity_provider=dict(type="str"), + weight=dict(type="int"), + base=dict(type="int"), + ), ), propagate_tags=dict(required=False, choices=["TASK_DEFINITION", "SERVICE"]), tags=dict(required=False, type="dict"), enable_execute_command=dict(required=False, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_if=[('launch_type', 'FARGATE', ['network_configuration'])], - required_together=[['load_balancers', 'role']], - mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[("launch_type", "FARGATE", ["network_configuration"])], + required_together=[["load_balancers", "role"]], + mutually_exclusive=[["launch_type", "capacity_provider_strategy"]], + ) - if module.params['state'] == 'present': - if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None: - module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') - if module.params['task_definition'] is None and not module.params['force_new_deployment']: - module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.') + if module.params["state"] == "present": + if module.params["scheduling_strategy"] == "REPLICA" and module.params["desired_count"] is None: + module.fail_json(msg="state is present, scheduling_strategy is REPLICA; missing desired_count") + if module.params["task_definition"] is None and not module.params["force_new_deployment"]: + module.fail_json(msg="Either task_definition or force_new_deployment is required when status is present.") - if len(module.params['capacity_provider_strategy']) > 6: - module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') + if len(module.params["capacity_provider_strategy"]) > 6: + module.fail_json(msg="AWS allows a maximum of six capacity providers in the strategy.") service_mgr = EcsServiceManager(module) - if module.params['network_configuration']: - network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) + if module.params["network_configuration"]: + network_configuration = service_mgr.format_network_configuration(module.params["network_configuration"]) else: network_configuration = None - deployment_controller = map_complex_type(module.params['deployment_controller'], - DEPLOYMENT_CONTROLLER_TYPE_MAP) + deployment_controller = map_complex_type(module.params["deployment_controller"], DEPLOYMENT_CONTROLLER_TYPE_MAP) deploymentController = snake_dict_to_camel_dict(deployment_controller) - deployment_configuration = map_complex_type(module.params['deployment_configuration'], - DEPLOYMENT_CONFIGURATION_TYPE_MAP) + deployment_configuration = map_complex_type( + module.params["deployment_configuration"], DEPLOYMENT_CONFIGURATION_TYPE_MAP + ) deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) - serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries'])) - capacityProviders = list(map(snake_dict_to_camel_dict, module.params['capacity_provider_strategy'])) + serviceRegistries = list(map(snake_dict_to_camel_dict, module.params["service_registries"])) + capacityProviders = list(map(snake_dict_to_camel_dict, module.params["capacity_provider_strategy"])) try: - existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) + existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) except Exception as e: - module.fail_json_aws(e, - msg="Exception describing service '{0}' in cluster '{1}'" - .format(module.params['name'], module.params['cluster'])) + module.fail_json_aws( + e, + msg=f"Exception describing service '{module.params['name']}' in cluster '{module.params['cluster']}'", + ) results = dict(changed=False) - if module.params['state'] == 'present': - + if module.params["state"] == "present": matching = False update = False - if existing and 'status' in existing and existing['status'] == "ACTIVE": - if module.params['force_new_deployment']: + if existing and "status" in existing and existing["status"] == "ACTIVE": + if module.params["force_new_deployment"]: update = True elif service_mgr.is_matching_service(module.params, existing): matching = True - results['service'] = existing + results["service"] = existing else: update = True if not matching: if not module.check_mode: - - role = module.params['role'] - clientToken = module.params['client_token'] + role = module.params["role"] + clientToken = module.params["client_token"] loadBalancers = [] - for loadBalancer in module.params['load_balancers']: - if 'containerPort' in loadBalancer: - loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + for loadBalancer in module.params["load_balancers"]: + if "containerPort" in loadBalancer: + loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) loadBalancers.append(loadBalancer) for loadBalancer in loadBalancers: - if 'containerPort' in loadBalancer: - loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + if "containerPort" in loadBalancer: + loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) if update: # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature - if module.params['scheduling_strategy']: - if (existing['schedulingStrategy']) != module.params['scheduling_strategy']: - module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service") - - if module.params['service_registries']: - if (existing['serviceRegistries'] or []) != serviceRegistries: - module.fail_json(msg="It is not possible to update the service registries of an existing service") - if module.params['capacity_provider_strategy']: - if 'launchType' in existing.keys(): - module.fail_json(msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy.") - if module.params['launch_type']: - if 'capacityProviderStrategy' in existing.keys(): - module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") - if (existing['loadBalancers'] or []) != loadBalancers: + if module.params["scheduling_strategy"]: + if (existing["schedulingStrategy"]) != module.params["scheduling_strategy"]: + module.fail_json( + msg="It is not possible to update the scheduling strategy of an existing service" + ) + + if module.params["service_registries"]: + if (existing["serviceRegistries"] or []) != serviceRegistries: + module.fail_json( + msg="It is not possible to update the service registries of an existing service" + ) + if module.params["capacity_provider_strategy"]: + if "launchType" in existing.keys(): + module.fail_json( + msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy." + ) + if module.params["launch_type"]: + if "capacityProviderStrategy" in existing.keys(): + module.fail_json( + msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type." + ) + if (existing["loadBalancers"] or []) != loadBalancers: # fails if deployment type is not CODE_DEPLOY or ECS - if existing['deploymentController']['type'] not in ['CODE_DEPLOY', 'ECS']: - module.fail_json(msg="It is not possible to update the load balancers of an existing service") + if existing["deploymentController"]["type"] not in ["CODE_DEPLOY", "ECS"]: + module.fail_json( + msg="It is not possible to update the load balancers of an existing service" + ) - if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY': - task_definition = '' + if existing.get("deploymentController", {}).get("type", None) == "CODE_DEPLOY": + task_definition = "" network_configuration = [] else: - task_definition = module.params['task_definition'] + task_definition = module.params["task_definition"] - if module.params['propagate_tags'] and module.params['propagate_tags'] != existing['propagateTags']: - module.fail_json(msg="It is not currently supported to enable propagation tags of an existing service") + if module.params["propagate_tags"] and module.params["propagate_tags"] != existing["propagateTags"]: + module.fail_json( + msg="It is not currently supported to enable propagation tags of an existing service" + ) - if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']: + if ( + module.params["tags"] + and boto3_tag_list_to_ansible_dict(existing["tags"]) != module.params["tags"] + ): module.fail_json(msg="It is not currently supported to change tags of an existing service") - updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else [] + updatedLoadBalancers = loadBalancers if existing["deploymentController"]["type"] == "ECS" else [] - if task_definition is None and module.params['force_new_deployment']: - task_definition = existing['taskDefinition'] + if task_definition is None and module.params["force_new_deployment"]: + task_definition = existing["taskDefinition"] try: # update required @@ -1178,76 +1200,73 @@ def main(): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create service") - if response.get('tags', None): - response['tags'] = boto3_tag_list_to_ansible_dict(response['tags']) - results['service'] = response + if response.get("tags", None): + response["tags"] = boto3_tag_list_to_ansible_dict(response["tags"]) + results["service"] = response - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - del existing['deployments'] - del existing['events'] - results['ansible_facts'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + del existing["deployments"] + del existing["events"] + results["ansible_facts"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: try: service_mgr.delete_service( - module.params['name'], - module.params['cluster'], - module.params['force_deletion'], + module.params["name"], + module.params["cluster"], + module.params["force_deletion"], ) # Wait for service to be INACTIVE prior to exiting - if module.params['wait']: - waiter = service_mgr.ecs.get_waiter('services_inactive') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("services_inactive") try: waiter.wait( - services=[module.params['name']], - cluster=module.params['cluster'], + services=[module.params["name"]], + cluster=module.params["cluster"], WaiterConfig={ - 'Delay': module.params['delay'], - 'MaxAttempts': module.params['repeat'] - } + "Delay": module.params["delay"], + "MaxAttempts": module.params["repeat"], + }, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for service removal') + module.fail_json_aws(e, "Timeout waiting for service removal") except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't delete service") - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'deleting': + elif module.params["state"] == "deleting": if not existing: - module.fail_json(msg="Service '" + module.params['name'] + " not found.") + module.fail_json(msg="Service '" + module.params["name"] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted - delay = module.params['delay'] - repeat = module.params['repeat'] + delay = module.params["delay"] + repeat = module.params["repeat"] time.sleep(delay) for i in range(repeat): - existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) - status = existing['status'] + existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) + status = existing["status"] if status == "INACTIVE": - results['changed'] = True + results["changed"] = True break time.sleep(delay) if i is repeat - 1: - module.fail_json( - msg="Service still not deleted after {0} tries of {1} seconds each." - .format(repeat, delay) - ) + module.fail_json(msg=f"Service still not deleted after {repeat} tries of {delay} seconds each.") return module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_service_info.py b/ansible_collections/community/aws/plugins/modules/ecs_service_info.py index f174a31cd..02a6abff2 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_service_info.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_service_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_service_info version_added: 1.0.0 @@ -42,13 +40,12 @@ options: elements: str aliases: ['name'] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic listing example @@ -62,9 +59,9 @@ EXAMPLES = r''' - community.aws.ecs_service_info: cluster: test-cluster register: output -''' +""" -RETURN = r''' +RETURN = r""" services: description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below. returned: success @@ -132,16 +129,17 @@ services: returned: when events is true type: list elements: dict -''' # NOQA +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class EcsServiceManager: @@ -149,14 +147,14 @@ class EcsServiceManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_services_with_backoff(self, **kwargs): - paginator = self.ecs.get_paginator('list_services') + paginator = self.ecs.get_paginator("list_services") try: return paginator.paginate(**kwargs).build_full_result() - except is_boto3_error_code('ClusterNotFoundException') as e: + except is_boto3_error_code("ClusterNotFoundException") as e: self.module.fail_json_aws(e, "Could not find cluster to list services") @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) @@ -166,43 +164,43 @@ class EcsServiceManager: def list_services(self, cluster): fn_args = dict() if cluster and cluster is not None: - fn_args['cluster'] = cluster + fn_args["cluster"] = cluster try: response = self.list_services_with_backoff(**fn_args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't list ECS services") - relevant_response = dict(services=response['serviceArns']) + relevant_response = dict(services=response["serviceArns"]) return relevant_response def describe_services(self, cluster, services): fn_args = dict() if cluster and cluster is not None: - fn_args['cluster'] = cluster - fn_args['services'] = services + fn_args["cluster"] = cluster + fn_args["services"] = services try: response = self.describe_services_with_backoff(**fn_args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't describe ECS services") - running_services = [self.extract_service_from(service) for service in response.get('services', [])] - services_not_running = response.get('failures', []) + running_services = [self.extract_service_from(service) for service in response.get("services", [])] + services_not_running = response.get("failures", []) return running_services, services_not_running def extract_service_from(self, service): # some fields are datetime which is not JSON serializable # make them strings - if 'deployments' in service: - for d in service['deployments']: - if 'createdAt' in d: - d['createdAt'] = str(d['createdAt']) - if 'updatedAt' in d: - d['updatedAt'] = str(d['updatedAt']) - if 'events' in service: - if not self.module.params['events']: - del service['events'] + if "deployments" in service: + for d in service["deployments"]: + if "createdAt" in d: + d["createdAt"] = str(d["createdAt"]) + if "updatedAt" in d: + d["updatedAt"] = str(d["updatedAt"]) + if "events" in service: + if not self.module.params["events"]: + del service["events"] else: - for e in service['events']: - if 'createdAt' in e: - e['createdAt'] = str(e['createdAt']) + for e in service["events"]: + if "createdAt" in e: + e["createdAt"] = str(e["createdAt"]) return service @@ -210,38 +208,37 @@ def chunks(l, n): """Yield successive n-sized chunks from l.""" """ https://stackoverflow.com/a/312464 """ for i in range(0, len(l), n): - yield l[i:i + n] + yield l[i:i + n] # fmt: skip def main(): - argument_spec = dict( - details=dict(type='bool', default=False), - events=dict(type='bool', default=True), + details=dict(type="bool", default=False), + events=dict(type="bool", default=True), cluster=dict(), - service=dict(type='list', elements='str', aliases=['name']) + service=dict(type="list", elements="str", aliases=["name"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - show_details = module.params.get('details') + show_details = module.params.get("details") task_mgr = EcsServiceManager(module) if show_details: - if module.params['service']: - services = module.params['service'] + if module.params["service"]: + services = module.params["service"] else: - services = task_mgr.list_services(module.params['cluster'])['services'] + services = task_mgr.list_services(module.params["cluster"])["services"] ecs_info = dict(services=[], services_not_running=[]) for chunk in chunks(services, 10): - running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk) - ecs_info['services'].extend(running_services) - ecs_info['services_not_running'].extend(services_not_running) + running_services, services_not_running = task_mgr.describe_services(module.params["cluster"], chunk) + ecs_info["services"].extend(running_services) + ecs_info["services_not_running"].extend(services_not_running) else: - ecs_info = task_mgr.list_services(module.params['cluster']) + ecs_info = task_mgr.list_services(module.params["cluster"]) module.exit_json(changed=False, **ecs_info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_tag.py b/ansible_collections/community/aws/plugins/modules/ecs_tag.py index 8698a7bbd..dd09096ea 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_tag.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_tag.py @@ -1,20 +1,17 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: (c) 2019, Michael Pechner <mikey@mikey.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_tag version_added: 1.0.0 short_description: create and remove tags on Amazon ECS resources -notes: - - none description: - - Creates and removes tags for Amazon ECS resources. - - Resources are referenced by their cluster name. + - Creates and removes tags for Amazon ECS resources. + - Resources are referenced by their cluster name. author: - Michael Pechner (@mpechner) options: @@ -53,13 +50,12 @@ options: type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure tags are present on a resource community.aws.ecs_tag: cluster_name: mycluster @@ -90,12 +86,12 @@ EXAMPLES = r''' cluster_name: mycluster resource_type: cluster tags: - Name: foo + Name: foo state: absent purge_tags: true -''' +""" -RETURN = r''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always @@ -108,47 +104,49 @@ removed_tags: description: A dict of tags that were removed from the resource returned: If tags were removed type: dict -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: - pass # Handled by AnsibleAWSModule -__metaclass__ = type + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_tags(ecs, module, resource): try: - return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags']) + return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)["tags"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource)) + module.fail_json_aws(e, msg=f"Failed to fetch tags for resource {resource}") def get_arn(ecs, module, cluster_name, resource_type, resource): - try: - if resource_type == 'cluster': + if resource_type == "cluster": description = ecs.describe_clusters(clusters=[resource]) - resource_arn = description['clusters'][0]['clusterArn'] - elif resource_type == 'task': + resource_arn = description["clusters"][0]["clusterArn"] + elif resource_type == "task": description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource]) - resource_arn = description['tasks'][0]['taskArn'] - elif resource_type == 'service': + resource_arn = description["tasks"][0]["taskArn"] + elif resource_type == "service": description = ecs.describe_services(cluster=cluster_name, services=[resource]) - resource_arn = description['services'][0]['serviceArn'] - elif resource_type == 'task_definition': + resource_arn = description["services"][0]["serviceArn"] + elif resource_type == "task_definition": description = ecs.describe_task_definition(taskDefinition=resource) - resource_arn = description['taskDefinition']['taskDefinitionArn'] - elif resource_type == 'container': + resource_arn = description["taskDefinition"]["taskDefinitionArn"] + elif resource_type == "container": description = ecs.describe_container_instances(clusters=[resource]) - resource_arn = description['containerInstances'][0]['containerInstanceArn'] + resource_arn = description["containerInstances"][0]["containerInstanceArn"] except (IndexError, KeyError): - module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource)) + module.fail_json(msg=f"Failed to find {resource_type} {resource}") except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource)) + module.fail_json_aws(e, msg=f"Failed to find {resource_type} {resource}") return resource_arn @@ -157,28 +155,28 @@ def main(): argument_spec = dict( cluster_name=dict(required=True), resource=dict(required=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container']) + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + resource_type=dict(default="cluster", choices=["cluster", "task", "service", "task_definition", "container"]), ) - required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])] + required_if = [("state", "present", ["tags"]), ("state", "absent", ["tags"])] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) - resource_type = module.params['resource_type'] - cluster_name = module.params['cluster_name'] - if resource_type == 'cluster': + resource_type = module.params["resource_type"] + cluster_name = module.params["cluster_name"] + if resource_type == "cluster": resource = cluster_name else: - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - ecs = module.client('ecs') + ecs = module.client("ecs") resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource) @@ -187,7 +185,7 @@ def main(): add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) remove_tags = {} - if state == 'absent': + if state == "absent": for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): remove_tags[key] = current_tags[key] @@ -196,28 +194,28 @@ def main(): remove_tags[key] = current_tags[key] if remove_tags: - result['changed'] = True - result['removed_tags'] = remove_tags + result["changed"] = True + result["removed_tags"] = remove_tags if not module.check_mode: try: ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + module.fail_json_aws(e, msg=f"Failed to remove tags {remove_tags} from resource {resource}") - if state == 'present' and add_tags: - result['changed'] = True - result['added_tags'] = add_tags + if state == "present" and add_tags: + result["changed"] = True + result["added_tags"] = add_tags current_tags.update(add_tags) if not module.check_mode: try: - tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value') + tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name="key", tag_value_key_name="value") ecs.tag_resource(resourceArn=resource_arn, tags=tags) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + module.fail_json_aws(e, msg=f"Failed to set tags {add_tags} on resource {resource}") - result['tags'] = get_tags(ecs, module, resource_arn) + result["tags"] = get_tags(ecs, module, resource_arn) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_task.py b/ansible_collections/community/aws/plugins/modules/ecs_task.py index 54948ce21..169ff4c7b 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_task.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_task.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_task version_added: 1.0.0 @@ -99,13 +97,12 @@ options: default: false version_added: 4.1.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of run task - name: Run task community.aws.ecs_task: @@ -120,65 +117,66 @@ EXAMPLES = r''' - name: Start a task community.aws.ecs_task: - operation: start - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" - tags: - resourceName: a_task_for_ansible_to_run - type: long_running_task - network: internal - version: 1.4 - container_instances: + operation: start + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + tags: + resourceName: a_task_for_ansible_to_run + type: long_running_task + network: internal + version: 1.4 + container_instances: - arn:aws:ecs:us-west-2:123456789012:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8 - started_by: ansible_user - network_configuration: - subnets: + started_by: ansible_user + network_configuration: + subnets: - subnet-abcd1234 - security_groups: + security_groups: - sg-aaaa1111 - my_security_group register: task_output - name: RUN a task on Fargate community.aws.ecs_task: - operation: run - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" - started_by: ansible_user - launch_type: FARGATE - network_configuration: - subnets: + operation: run + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + started_by: ansible_user + launch_type: FARGATE + network_configuration: + subnets: - subnet-abcd1234 - security_groups: + security_groups: - sg-aaaa1111 - my_security_group register: task_output - name: RUN a task on Fargate with public ip assigned community.aws.ecs_task: - operation: run - count: 2 - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" - started_by: ansible_user - launch_type: FARGATE - network_configuration: - assign_public_ip: true - subnets: + operation: run + count: 2 + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + started_by: ansible_user + launch_type: FARGATE + network_configuration: + assign_public_ip: true + subnets: - subnet-abcd1234 register: task_output - name: Stop a task community.aws.ecs_task: - operation: stop - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" -''' -RETURN = r''' + operation: stop + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" +""" + +RETURN = r""" task: description: details about the task that was started returned: success @@ -242,45 +240,47 @@ task: description: The launch type on which to run your task. returned: always type: str -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class EcsExecManager: """Handles ECS Tasks""" def __init__(self, module): self.module = module - self.ecs = module.client('ecs') - self.ec2 = module.client('ec2') + self.ecs = module.client("ecs") + self.ec2 = module.client("ec2") def format_network_configuration(self, network_config): result = dict() - if 'subnets' in network_config: - result['subnets'] = network_config['subnets'] + if "subnets" in network_config: + result["subnets"] = network_config["subnets"] else: self.module.fail_json(msg="Network configuration must include subnets") - if 'security_groups' in network_config: - groups = network_config['security_groups'] - if any(not sg.startswith('sg-') for sg in groups): + if "security_groups" in network_config: + groups = network_config["security_groups"] + if any(not sg.startswith("sg-") for sg in groups): try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result['securityGroups'] = groups - if 'assign_public_ip' in network_config: - if network_config['assign_public_ip'] is True: - result['assignPublicIp'] = "ENABLED" + result["securityGroups"] = groups + if "assign_public_ip" in network_config: + if network_config["assign_public_ip"] is True: + result["assignPublicIp"] = "ENABLED" else: - result['assignPublicIp'] = "DISABLED" + result["assignPublicIp"] = "DISABLED" return dict(awsvpcConfiguration=result) @@ -288,10 +288,10 @@ class EcsExecManager: response = self.ecs.list_tasks( cluster=cluster_name, family=service_name, - desiredStatus=status + desiredStatus=status, ) - if len(response['taskArns']) > 0: - for c in response['taskArns']: + if len(response["taskArns"]) > 0: + for c in response["taskArns"]: if c.endswith(service_name): return c return None @@ -299,14 +299,17 @@ class EcsExecManager: def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags): if overrides is None: overrides = dict() - params = dict(cluster=cluster, taskDefinition=task_definition, - overrides=overrides, count=count, startedBy=startedBy) - if self.module.params['network_configuration']: - params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + params = dict( + cluster=cluster, taskDefinition=task_definition, overrides=overrides, count=count, startedBy=startedBy + ) + if self.module.params["network_configuration"]: + params["networkConfiguration"] = self.format_network_configuration( + self.module.params["network_configuration"] + ) if launch_type: - params['launchType'] = launch_type + params["launchType"] = launch_type if tags: - params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") # TODO: need to check if long arn format enabled. try: @@ -314,168 +317,164 @@ class EcsExecManager: except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't run task") # include tasks and failures - return response['tasks'] + return response["tasks"] def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags): args = dict() if cluster: - args['cluster'] = cluster + args["cluster"] = cluster if task_definition: - args['taskDefinition'] = task_definition + args["taskDefinition"] = task_definition if overrides: - args['overrides'] = overrides + args["overrides"] = overrides if container_instances: - args['containerInstances'] = container_instances + args["containerInstances"] = container_instances if startedBy: - args['startedBy'] = startedBy - if self.module.params['network_configuration']: - args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + args["startedBy"] = startedBy + if self.module.params["network_configuration"]: + args["networkConfiguration"] = self.format_network_configuration( + self.module.params["network_configuration"] + ) if tags: - args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + args["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") try: response = self.ecs.start_task(**args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't start task") # include tasks and failures - return response['tasks'] + return response["tasks"] def stop_task(self, cluster, task): response = self.ecs.stop_task(cluster=cluster, task=task) - return response['task'] + return response["task"] def ecs_task_long_format_enabled(self): - account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True) - return account_support['settings'][0]['value'] == 'enabled' + account_support = self.ecs.list_account_settings(name="taskLongArnFormat", effectiveSettings=True) + return account_support["settings"][0]["value"] == "enabled" def main(): argument_spec = dict( - operation=dict(required=True, choices=['run', 'start', 'stop']), - cluster=dict(required=False, type='str', default='default'), # R S P - task_definition=dict(required=False, type='str'), # R* S* - overrides=dict(required=False, type='dict'), # R S - count=dict(required=False, type='int'), # R - task=dict(required=False, type='str'), # P* - container_instances=dict(required=False, type='list', elements='str'), # S* - started_by=dict(required=False, type='str'), # R S - network_configuration=dict(required=False, type='dict'), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - wait=dict(required=False, default=False, type='bool'), + operation=dict(required=True, choices=["run", "start", "stop"]), + cluster=dict(required=False, type="str", default="default"), # R S P + task_definition=dict(required=False, type="str"), # R* S* + overrides=dict(required=False, type="dict"), # R S + count=dict(required=False, type="int"), # R + task=dict(required=False, type="str"), # P* + container_instances=dict(required=False, type="list", elements="str"), # S* + started_by=dict(required=False, type="str"), # R S + network_configuration=dict(required=False, type="dict"), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + wait=dict(required=False, default=False, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, - required_if=[ - ('launch_type', 'FARGATE', ['network_configuration']), - ('operation', 'run', ['task_definition']), - ('operation', 'start', [ - 'task_definition', - 'container_instances' - ]), - ('operation', 'stop', ['task_definition', 'task']), - ]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ("launch_type", "FARGATE", ["network_configuration"]), + ("operation", "run", ["task_definition"]), + ("operation", "start", ["task_definition", "container_instances"]), + ("operation", "stop", ["task_definition", "task"]), + ], + ) # Validate Inputs - if module.params['operation'] == 'run': - task_to_list = module.params['task_definition'] + if module.params["operation"] == "run": + task_to_list = module.params["task_definition"] status_type = "RUNNING" - if module.params['operation'] == 'start': - task_to_list = module.params['task'] + if module.params["operation"] == "start": + task_to_list = module.params["task"] status_type = "RUNNING" - if module.params['operation'] == 'stop': - task_to_list = module.params['task_definition'] + if module.params["operation"] == "stop": + task_to_list = module.params["task_definition"] status_type = "STOPPED" service_mgr = EcsExecManager(module) - if module.params['tags']: + if module.params["tags"]: if not service_mgr.ecs_task_long_format_enabled(): module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags") - existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type) + existing = service_mgr.list_tasks(module.params["cluster"], task_to_list, status_type) results = dict(changed=False) - if module.params['operation'] == 'run': + if module.params["operation"] == "run": if existing: # TBD - validate the rest of the details - results['task'] = existing + results["task"] = existing else: if not module.check_mode: - # run_task returns a list of tasks created tasks = service_mgr.run_task( - module.params['cluster'], - module.params['task_definition'], - module.params['overrides'], - module.params['count'], - module.params['started_by'], - module.params['launch_type'], - module.params['tags'], + module.params["cluster"], + module.params["task_definition"], + module.params["overrides"], + module.params["count"], + module.params["started_by"], + module.params["launch_type"], + module.params["tags"], ) # Wait for task(s) to be running prior to exiting - if module.params['wait']: - - waiter = service_mgr.ecs.get_waiter('tasks_running') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("tasks_running") try: waiter.wait( - tasks=[task['taskArn'] for task in tasks], - cluster=module.params['cluster'], + tasks=[task["taskArn"] for task in tasks], + cluster=module.params["cluster"], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for tasks to run') + module.fail_json_aws(e, "Timeout waiting for tasks to run") - results['task'] = tasks + results["task"] = tasks - results['changed'] = True + results["changed"] = True - elif module.params['operation'] == 'start': + elif module.params["operation"] == "start": if existing: # TBD - validate the rest of the details - results['task'] = existing + results["task"] = existing else: if not module.check_mode: - results['task'] = service_mgr.start_task( - module.params['cluster'], - module.params['task_definition'], - module.params['overrides'], - module.params['container_instances'], - module.params['started_by'], - module.params['tags'], + results["task"] = service_mgr.start_task( + module.params["cluster"], + module.params["task_definition"], + module.params["overrides"], + module.params["container_instances"], + module.params["started_by"], + module.params["tags"], ) - results['changed'] = True + results["changed"] = True - elif module.params['operation'] == 'stop': + elif module.params["operation"] == "stop": if existing: - results['task'] = existing + results["task"] = existing else: if not module.check_mode: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - results['task'] = service_mgr.stop_task( - module.params['cluster'], - module.params['task'] - ) + results["task"] = service_mgr.stop_task(module.params["cluster"], module.params["task"]) # Wait for task to be stopped prior to exiting - if module.params['wait']: - - waiter = service_mgr.ecs.get_waiter('tasks_stopped') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("tasks_stopped") try: waiter.wait( - tasks=[module.params['task']], - cluster=module.params['cluster'], + tasks=[module.params["task"]], + cluster=module.params["cluster"], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for task to stop') + module.fail_json_aws(e, "Timeout waiting for task to stop") - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py index a8b5e97d8..25a786e4f 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_taskdefinition version_added: 1.0.0 @@ -629,50 +627,72 @@ options: expression: description: A cluster query language expression to apply to the constraint. type: str + runtime_platform: + version_added: 6.4.0 + description: + - runtime platform configuration for the task + required: false + type: dict + default: { + "operatingSystemFamily": "LINUX", + "cpuArchitecture": "X86_64" + } + suboptions: + cpuArchitecture: + description: The CPU Architecture type to be used by the task + type: str + required: false + choices: ['X86_64', 'ARM64'] + operatingSystemFamily: + description: OS type to be used by the task + type: str + required: false + choices: ['LINUX', 'WINDOWS_SERVER_2019_FULL', 'WINDOWS_SERVER_2019_CORE', 'WINDOWS_SERVER_2022_FULL', 'WINDOWS_SERVER_2022_CORE'] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create task definition community.aws.ecs_taskdefinition: containers: - - name: simple-app - cpu: 10 - essential: true - image: "httpd:2.4" - memory: 300 - mountPoints: - - containerPath: /usr/local/apache2/htdocs - sourceVolume: my-vol - portMappings: - - containerPort: 80 - hostPort: 80 - logConfiguration: - logDriver: awslogs - options: - awslogs-group: /ecs/test-cluster-taskdef - awslogs-region: us-west-2 - awslogs-stream-prefix: ecs - - name: busybox - command: - - > - /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations! - </h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom; - cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done" - cpu: 10 - entryPoint: - - sh - - "-c" - essential: false - image: busybox - memory: 200 - volumesFrom: - - sourceContainer: simple-app + - name: simple-app + cpu: 10 + essential: true + image: "httpd:2.4" + memory: 300 + mountPoints: + - containerPath: /usr/local/apache2/htdocs + sourceVolume: my-vol + portMappings: + - containerPort: 80 + hostPort: 80 + logConfiguration: + logDriver: awslogs + options: + awslogs-group: /ecs/test-cluster-taskdef + awslogs-region: us-west-2 + awslogs-stream-prefix: ecs + - name: busybox + command: + - > + /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1> + <h2>Congratulations!</h2> + <p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom; + cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done" + cpu: 10 + entryPoint: + - sh + - "-c" + essential: false + image: busybox + memory: 200 + volumesFrom: + - sourceContainer: simple-app volumes: - - name: my-vol + - name: my-vol family: test-cluster-taskdef state: present register: task_output @@ -681,26 +701,26 @@ EXAMPLES = r''' community.aws.ecs_taskdefinition: family: nginx containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 - cpu: 512 - memory: 1024 + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 state: present - name: Create task definition community.aws.ecs_taskdefinition: family: nginx containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 launch_type: FARGATE cpu: 512 memory: 1024 @@ -711,36 +731,36 @@ EXAMPLES = r''' community.aws.ecs_taskdefinition: family: nginx containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 - cpu: 512 - memory: 1024 - dependsOn: - - containerName: "simple-app" - condition: "start" + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 + dependsOn: + - containerName: "simple-app" + condition: "start" # Create Task Definition with Environment Variables and Secrets - name: Create task definition community.aws.ecs_taskdefinition: family: nginx containers: - - name: nginx - essential: true - image: "nginx" - environment: - - name: "PORT" - value: "8080" - secrets: - # For variables stored in Secrets Manager - - name: "NGINX_HOST" - valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST" - # For variables stored in Parameter Store - - name: "API_KEY" - valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY" + - name: nginx + essential: true + image: "nginx" + environment: + - name: "PORT" + value: "8080" + secrets: + # For variables stored in Secrets Manager + - name: "NGINX_HOST" + valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST" + # For variables stored in Parameter Store + - name: "API_KEY" + valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY" launch_type: FARGATE cpu: 512 memory: 1GB @@ -752,39 +772,40 @@ EXAMPLES = r''' community.aws.ecs_taskdefinition: family: nginx containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 - cpu: 512 - memory: 1024 - healthCheck: - command: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 + healthCheck: + command: - CMD-SHELL - /app/healthcheck.py - interval: 60 - retries: 3 - startPeriod: 15 - timeout: 15 + interval: 60 + retries: 3 + startPeriod: 15 + timeout: 15 state: present -''' -RETURN = r''' +""" + +RETURN = r""" taskdefinition: description: a reflection of the input parameters type: dict returned: always -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class EcsTaskManager: @@ -793,49 +814,65 @@ class EcsTaskManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs', AWSRetry.jittered_backoff()) + self.ecs = module.client("ecs", AWSRetry.jittered_backoff()) def describe_task(self, task_name): try: response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name) - return response['taskDefinition'] + return response["taskDefinition"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: return None - def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, - volumes, launch_type, cpu, memory, placement_constraints): + def register_task( + self, + family, + task_role_arn, + execution_role_arn, + network_mode, + container_definitions, + volumes, + launch_type, + cpu, + memory, + placement_constraints, + runtime_platform, + ): validated_containers = [] # Ensures the number parameters are int as required by the AWS SDK for container in container_definitions: - for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'): + for param in ("memory", "cpu", "memoryReservation", "startTimeout", "stopTimeout"): if param in container: container[param] = int(container[param]) - if 'portMappings' in container: - for port_mapping in container['portMappings']: - for port in ('hostPort', 'containerPort'): + if "portMappings" in container: + for port_mapping in container["portMappings"]: + for port in ("hostPort", "containerPort"): if port in port_mapping: port_mapping[port] = int(port_mapping[port]) - if network_mode == 'awsvpc' and 'hostPort' in port_mapping: - if port_mapping['hostPort'] != port_mapping.get('containerPort'): - self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as " - "container port or not be set") - - if 'linuxParameters' in container: - for linux_param in container.get('linuxParameters'): - if linux_param == 'tmpfs': - for tmpfs_param in container['linuxParameters']['tmpfs']: - if 'size' in tmpfs_param: - tmpfs_param['size'] = int(tmpfs_param['size']) - - for param in ('maxSwap', 'swappiness', 'sharedMemorySize'): + if network_mode == "awsvpc" and "hostPort" in port_mapping: + if port_mapping["hostPort"] != port_mapping.get("containerPort"): + self.module.fail_json( + msg=( + "In awsvpc network mode, host port must be set to the same as " + "container port or not be set" + ) + ) + + if "linuxParameters" in container: + for linux_param in container.get("linuxParameters"): + if linux_param == "tmpfs": + for tmpfs_param in container["linuxParameters"]["tmpfs"]: + if "size" in tmpfs_param: + tmpfs_param["size"] = int(tmpfs_param["size"]) + + for param in ("maxSwap", "swappiness", "sharedMemorySize"): if param in linux_param: - container['linuxParameters'][param] = int(container['linuxParameters'][param]) + container["linuxParameters"][param] = int(container["linuxParameters"][param]) - if 'ulimits' in container: - for limits_mapping in container['ulimits']: - for limit in ('softLimit', 'hardLimit'): + if "ulimits" in container: + for limits_mapping in container["ulimits"]: + for limit in ("softLimit", "hardLimit"): if limit in limits_mapping: limits_mapping[limit] = int(limits_mapping[limit]) @@ -845,47 +882,44 @@ class EcsTaskManager: family=family, taskRoleArn=task_role_arn, containerDefinitions=container_definitions, - volumes=volumes + volumes=volumes, ) - if network_mode != 'default': - params['networkMode'] = network_mode + if network_mode != "default": + params["networkMode"] = network_mode if cpu: - params['cpu'] = cpu + params["cpu"] = cpu if memory: - params['memory'] = memory + params["memory"] = memory if launch_type: - params['requiresCompatibilities'] = [launch_type] + params["requiresCompatibilities"] = [launch_type] if execution_role_arn: - params['executionRoleArn'] = execution_role_arn + params["executionRoleArn"] = execution_role_arn if placement_constraints: - params['placementConstraints'] = placement_constraints + params["placementConstraints"] = placement_constraints + if runtime_platform: + params["runtimePlatform"] = runtime_platform try: response = self.ecs.register_task_definition(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to register task") - return response['taskDefinition'] + return response["taskDefinition"] def describe_task_definitions(self, family): - data = { - "taskDefinitionArns": [], - "nextToken": None - } + data = {"taskDefinitionArns": [], "nextToken": None} def fetch(): # Boto3 is weird about params passed, so only pass nextToken if we have a value - params = { - 'familyPrefix': family - } + params = {"familyPrefix": family} - if data['nextToken']: - params['nextToken'] = data['nextToken'] + if data["nextToken"]: + params["nextToken"] = data["nextToken"] result = self.ecs.list_task_definitions(**params) - data['taskDefinitionArns'] += result['taskDefinitionArns'] - data['nextToken'] = result.get('nextToken', None) - return data['nextToken'] is not None + data["taskDefinitionArns"] += result["taskDefinitionArns"] + data["nextToken"] = result.get("nextToken", None) + return data["nextToken"] is not None # Fetch all the arns, possibly across multiple pages while fetch(): @@ -894,118 +928,154 @@ class EcsTaskManager: # Return the full descriptions of the task definitions, sorted ascending by revision return list( sorted( - [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], - key=lambda td: td['revision'] + [ + self.ecs.describe_task_definition(taskDefinition=arn)["taskDefinition"] + for arn in data["taskDefinitionArns"] + ], + key=lambda td: td["revision"], ) ) def deregister_task(self, taskArn): response = self.ecs.deregister_task_definition(taskDefinition=taskArn) - return response['taskDefinition'] + return response["taskDefinition"] def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - arn=dict(required=False, type='str'), - family=dict(required=False, type='str'), - revision=dict(required=False, type='int'), - force_create=dict(required=False, default=False, type='bool'), - containers=dict(required=True, type='list', elements='dict'), - network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'), - task_role_arn=dict(required=False, default='', type='str'), - execution_role_arn=dict(required=False, default='', type='str'), - volumes=dict(required=False, type='list', elements='dict'), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), + state=dict(required=True, choices=["present", "absent"]), + arn=dict(required=False, type="str"), + family=dict(required=False, type="str"), + revision=dict(required=False, type="int"), + force_create=dict(required=False, default=False, type="bool"), + containers=dict(required=True, type="list", elements="dict"), + network_mode=dict( + required=False, default="bridge", choices=["default", "bridge", "host", "none", "awsvpc"], type="str" + ), + task_role_arn=dict(required=False, default="", type="str"), + execution_role_arn=dict(required=False, default="", type="str"), + volumes=dict(required=False, type="list", elements="dict"), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), cpu=dict(), - memory=dict(required=False, type='str'), - placement_constraints=dict(required=False, type='list', elements='dict', - options=dict(type=dict(type='str'), expression=dict(type='str'))), + memory=dict(required=False, type="str"), + placement_constraints=dict( + required=False, + type="list", + elements="dict", + options=dict(type=dict(type="str"), expression=dict(type="str")), + ), + runtime_platform=dict( + required=False, + default={"operatingSystemFamily": "LINUX", "cpuArchitecture": "X86_64"}, + type="dict", + options=dict( + cpuArchitecture=dict(required=False, choices=["X86_64", "ARM64"]), + operatingSystemFamily=dict( + required=False, + choices=[ + "LINUX", + "WINDOWS_SERVER_2019_FULL", + "WINDOWS_SERVER_2019_CORE", + "WINDOWS_SERVER_2022_FULL", + "WINDOWS_SERVER_2022_CORE", + ], + ), + ), + ), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])] - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[("launch_type", "FARGATE", ["cpu", "memory"])], + ) task_to_describe = None task_mgr = EcsTaskManager(module) results = dict(changed=False) - if module.params['state'] == 'present': - if 'containers' not in module.params or not module.params['containers']: + if module.params["state"] == "present": + if "containers" not in module.params or not module.params["containers"]: module.fail_json(msg="To use task definitions, a list of containers must be specified") - if 'family' not in module.params or not module.params['family']: + if "family" not in module.params or not module.params["family"]: module.fail_json(msg="To use task definitions, a family must be specified") - network_mode = module.params['network_mode'] - launch_type = module.params['launch_type'] - placement_constraints = module.params['placement_constraints'] - if launch_type == 'FARGATE': - if network_mode != 'awsvpc': + network_mode = module.params["network_mode"] + launch_type = module.params["launch_type"] + placement_constraints = module.params["placement_constraints"] + if launch_type == "FARGATE": + if network_mode != "awsvpc": module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") if placement_constraints: module.fail_json(msg="Task placement constraints are not supported for tasks run on Fargate") - for container in module.params['containers']: - if container.get('links') and network_mode == 'awsvpc': - module.fail_json(msg='links parameter is not supported if network mode is awsvpc.') + for container in module.params["containers"]: + if container.get("links") and network_mode == "awsvpc": + module.fail_json(msg="links parameter is not supported if network mode is awsvpc.") - for environment in container.get('environment', []): - environment['value'] = environment['value'] + for environment in container.get("environment", []): + environment["value"] = environment["value"] - for environment_file in container.get('environmentFiles', []): - if environment_file['type'] != 's3': - module.fail_json(msg='The only supported value for environmentFiles is s3.') + for environment_file in container.get("environmentFiles", []): + if environment_file["type"] != "s3": + module.fail_json(msg="The only supported value for environmentFiles is s3.") - for linux_param in container.get('linuxParameters', {}): - if linux_param == 'maxSwap' and launch_type == 'FARGATE': - module.fail_json(msg='devices parameter is not supported with the FARGATE launch type.') + for linux_param in container.get("linuxParameters", {}): + if linux_param == "maxSwap" and launch_type == "FARGATE": + module.fail_json(msg="devices parameter is not supported with the FARGATE launch type.") - if linux_param == 'maxSwap' and launch_type == 'FARGATE': - module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.') - elif linux_param == 'maxSwap' and int(container['linuxParameters']['maxSwap']) < 0: - module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.') + if linux_param == "maxSwap" and launch_type == "FARGATE": + module.fail_json(msg="maxSwap parameter is not supported with the FARGATE launch type.") + elif linux_param == "maxSwap" and int(container["linuxParameters"]["maxSwap"]) < 0: + module.fail_json(msg="Accepted values for maxSwap are 0 or any positive integer.") - if ( - linux_param == 'swappiness' and - (int(container['linuxParameters']['swappiness']) < 0 or int(container['linuxParameters']['swappiness']) > 100) + if linux_param == "swappiness" and ( + int(container["linuxParameters"]["swappiness"]) < 0 + or int(container["linuxParameters"]["swappiness"]) > 100 ): - module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.') + module.fail_json(msg="Accepted values for swappiness are whole numbers between 0 and 100.") - if linux_param == 'sharedMemorySize' and launch_type == 'FARGATE': - module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.') + if linux_param == "sharedMemorySize" and launch_type == "FARGATE": + module.fail_json(msg="sharedMemorySize parameter is not supported with the FARGATE launch type.") - if linux_param == 'tmpfs' and launch_type == 'FARGATE': - module.fail_json(msg='tmpfs parameter is not supported with the FARGATE launch type.') + if linux_param == "tmpfs" and launch_type == "FARGATE": + module.fail_json(msg="tmpfs parameter is not supported with the FARGATE launch type.") - if container.get('hostname') and network_mode == 'awsvpc': - module.fail_json(msg='hostname parameter is not supported when the awsvpc network mode is used.') + if container.get("hostname") and network_mode == "awsvpc": + module.fail_json(msg="hostname parameter is not supported when the awsvpc network mode is used.") - if container.get('extraHosts') and network_mode == 'awsvpc': - module.fail_json(msg='extraHosts parameter is not supported when the awsvpc network mode is used.') + if container.get("extraHosts") and network_mode == "awsvpc": + module.fail_json(msg="extraHosts parameter is not supported when the awsvpc network mode is used.") - family = module.params['family'] - existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) + family = module.params["family"] + existing_definitions_in_family = task_mgr.describe_task_definitions(module.params["family"]) - if 'revision' in module.params and module.params['revision']: + if "revision" in module.params and module.params["revision"]: # The definition specifies revision. We must guarantee that an active revision of that number will result from this. - revision = int(module.params['revision']) + revision = int(module.params["revision"]) # A revision has been explicitly specified. Attempt to locate a matching revision - tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision] + tasks_defs_for_revision = [td for td in existing_definitions_in_family if td["revision"] == revision] existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None - if existing and existing['status'] != "ACTIVE": + if existing and existing["status"] != "ACTIVE": # We cannot reactivate an inactive revision - module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision)) + module.fail_json( + msg=f"A task in family '{family}' already exists for revision {int(revision)}, but it is inactive" + ) elif not existing: if not existing_definitions_in_family and revision != 1: - module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision) - elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision: - module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % - (revision, existing_definitions_in_family[-1]['revision'] + 1)) + module.fail_json( + msg=f"You have specified a revision of {int(revision)} but a created revision would be 1" + ) + elif existing_definitions_in_family and existing_definitions_in_family[-1]["revision"] + 1 != revision: + module.fail_json( + msg=( + f"You have specified a revision of {int(revision)} but a created revision would be" + f" {int(existing_definitions_in_family[-1]['revision'] + 1)}" + ) + ) else: existing = None @@ -1025,9 +1095,9 @@ def main(): if list_val not in right_list: # if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp') # fill in that default if absent and see if it is in right_list then - if isinstance(list_val, dict) and not list_val.get('protocol'): + if isinstance(list_val, dict) and not list_val.get("protocol"): modified_list_val = dict(list_val) - modified_list_val.update(protocol='tcp') + modified_list_val.update(protocol="tcp") if modified_list_val in right_list: continue else: @@ -1037,24 +1107,32 @@ def main(): for k, v in right.items(): if v and k not in left: # 'essential' defaults to True when not specified - if k == 'essential' and v is True: + if k == "essential" and v is True: pass else: return False return True - def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, existing_task_definition): - if td['status'] != "ACTIVE": + def _task_definition_matches( + requested_volumes, + requested_containers, + requested_task_role_arn, + requested_launch_type, + existing_task_definition, + ): + if td["status"] != "ACTIVE": return None - if requested_task_role_arn != td.get('taskRoleArn', ""): + if requested_task_role_arn != td.get("taskRoleArn", ""): return None - if requested_launch_type is not None and requested_launch_type not in td.get('requiresCompatibilities', []): + if requested_launch_type is not None and requested_launch_type not in td.get( + "requiresCompatibilities", [] + ): return None - existing_volumes = td.get('volumes', []) or [] + existing_volumes = td.get("volumes", []) or [] if len(requested_volumes) != len(existing_volumes): # Nope. @@ -1072,7 +1150,7 @@ def main(): if not found: return None - existing_containers = td.get('containerDefinitions', []) or [] + existing_containers = td.get("containerDefinitions", []) or [] if len(requested_containers) != len(existing_containers): # Nope. @@ -1093,42 +1171,51 @@ def main(): # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested for td in existing_definitions_in_family: - requested_volumes = module.params['volumes'] or [] - requested_containers = module.params['containers'] or [] - requested_task_role_arn = module.params['task_role_arn'] - requested_launch_type = module.params['launch_type'] - existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td) + requested_volumes = module.params["volumes"] or [] + requested_containers = module.params["containers"] or [] + requested_task_role_arn = module.params["task_role_arn"] + requested_launch_type = module.params["launch_type"] + existing = _task_definition_matches( + requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td + ) if existing: break - if existing and not module.params.get('force_create'): + if existing and not module.params.get("force_create"): # Awesome. Have an existing one. Nothing to do. - results['taskdefinition'] = existing + results["taskdefinition"] = existing else: if not module.check_mode: # Doesn't exist. create it. - volumes = module.params.get('volumes', []) or [] - results['taskdefinition'] = task_mgr.register_task(module.params['family'], - module.params['task_role_arn'], - module.params['execution_role_arn'], - module.params['network_mode'], - module.params['containers'], - volumes, - module.params['launch_type'], - module.params['cpu'], - module.params['memory'], - module.params['placement_constraints'],) - results['changed'] = True - - elif module.params['state'] == 'absent': + volumes = module.params.get("volumes", []) or [] + results["taskdefinition"] = task_mgr.register_task( + module.params["family"], + module.params["task_role_arn"], + module.params["execution_role_arn"], + module.params["network_mode"], + module.params["containers"], + volumes, + module.params["launch_type"], + module.params["cpu"], + module.params["memory"], + module.params["placement_constraints"], + module.params["runtime_platform"], + ) + results["changed"] = True + + elif module.params["state"] == "absent": # When de-registering a task definition, we can specify the ARN OR the family and revision. - if module.params['state'] == 'absent': - if 'arn' in module.params and module.params['arn'] is not None: - task_to_describe = module.params['arn'] - elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \ - module.params['revision'] is not None: - task_to_describe = module.params['family'] + ":" + str(module.params['revision']) + if module.params["state"] == "absent": + if "arn" in module.params and module.params["arn"] is not None: + task_to_describe = module.params["arn"] + elif ( + "family" in module.params + and module.params["family"] is not None + and "revision" in module.params + and module.params["revision"] is not None + ): + task_to_describe = module.params["family"] + ":" + str(module.params["revision"]) else: module.fail_json(msg="To use task definitions, an arn or family and revision must be specified") @@ -1138,16 +1225,16 @@ def main(): pass else: # It exists, so we should delete it and mark changed. Return info about the task definition deleted - results['taskdefinition'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + results["taskdefinition"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: task_mgr.deregister_task(task_to_describe) - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py index 6fbc41731..5e235096d 100644 --- a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py +++ b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ecs_taskdefinition_info version_added: 1.0.0 @@ -27,20 +25,19 @@ options: required: true type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - community.aws.ecs_taskdefinition_info: task_definition: test-td -''' +""" -RETURN = ''' +RETURN = r""" container_definitions: description: Returns a list of complex objects representing the containers returned: success @@ -348,33 +345,34 @@ placement_constraints: description: A cluster query language expression to apply to the constraint. returned: when present type: str -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def main(): argument_spec = dict( - task_definition=dict(required=True, type='str') + task_definition=dict(required=True, type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - ecs = module.client('ecs') + ecs = module.client("ecs") try: - ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition'] + ecs_td = ecs.describe_task_definition(taskDefinition=module.params["task_definition"])["taskDefinition"] except botocore.exceptions.ClientError: ecs_td = {} module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/efs.py b/ansible_collections/community/aws/plugins/modules/efs.py index de1d563fb..6b9390f2b 100644 --- a/ansible_collections/community/aws/plugins/modules/efs.py +++ b/ansible_collections/community/aws/plugins/modules/efs.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: efs version_added: 1.0.0 @@ -102,34 +100,33 @@ options: version_added: 2.1.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: EFS provisioning community.aws.efs: state: present name: myTestEFS tags: - Name: myTestNameTag - purpose: file-storage + Name: myTestNameTag + purpose: file-storage targets: - - subnet_id: subnet-748c5d03 - security_groups: [ "sg-1a2b3c4d" ] + - subnet_id: subnet-748c5d03 + security_groups: ["sg-1a2b3c4d"] - name: Modifying EFS data community.aws.efs: state: present name: myTestEFS tags: - name: myAnotherTestTag + name: myAnotherTestTag targets: - - subnet_id: subnet-7654fdca - security_groups: [ "sg-4c5d6f7a" ] + - subnet_id: subnet-7654fdca + security_groups: ["sg-4c5d6f7a"] - name: Set a lifecycle policy community.aws.efs: @@ -137,8 +134,8 @@ EXAMPLES = r''' name: myTestEFS transition_to_ia: 7 targets: - - subnet_id: subnet-7654fdca - security_groups: [ "sg-4c5d6f7a" ] + - subnet_id: subnet-7654fdca + security_groups: ["sg-4c5d6f7a"] - name: Remove a lifecycle policy community.aws.efs: @@ -146,16 +143,16 @@ EXAMPLES = r''' name: myTestEFS transition_to_ia: None targets: - - subnet_id: subnet-7654fdca - security_groups: [ "sg-4c5d6f7a" ] + - subnet_id: subnet-7654fdca + security_groups: ["sg-4c5d6f7a"] - name: Deleting EFS community.aws.efs: state: absent name: myTestEFS -''' +""" -RETURN = r''' +RETURN = r""" creation_time: description: timestamp of creation date returned: always @@ -244,8 +241,7 @@ tags: "name": "my-efs", "key": "Value" } - -''' +""" from time import sleep from time import time as timestamp @@ -257,11 +253,12 @@ except ImportError as e: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _index_by_key(key, items): @@ -269,35 +266,34 @@ def _index_by_key(key, items): class EFSConnection(object): - DEFAULT_WAIT_TIMEOUT_SECONDS = 0 - STATE_CREATING = 'creating' - STATE_AVAILABLE = 'available' - STATE_DELETING = 'deleting' - STATE_DELETED = 'deleted' + STATE_CREATING = "creating" + STATE_AVAILABLE = "available" + STATE_DELETING = "deleting" + STATE_DELETED = "deleted" def __init__(self, module): - self.connection = module.client('efs') + self.connection = module.client("efs") region = module.region self.module = module self.region = region - self.wait = module.params.get('wait') - self.wait_timeout = module.params.get('wait_timeout') + self.wait = module.params.get("wait") + self.wait_timeout = module.params.get("wait_timeout") def get_file_systems(self, **kwargs): """ - Returns generator of file systems including all attributes of FS + Returns generator of file systems including all attributes of FS """ items = iterate_all( - 'FileSystems', + "FileSystems", self.connection.describe_file_systems, - **kwargs + **kwargs, ) for item in items: - item['Name'] = item['CreationToken'] - item['CreationTime'] = str(item['CreationTime']) + item["Name"] = item["CreationToken"] + item["CreationTime"] = str(item["CreationTime"]) """ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose @@ -305,90 +301,92 @@ class EFSConnection(object): AWS documentation is available here: https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html """ - item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - if 'Timestamp' in item['SizeInBytes']: - item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) - if item['LifeCycleState'] == self.STATE_AVAILABLE: - item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId']) - item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId'])) + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" + item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" + if "Timestamp" in item["SizeInBytes"]: + item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) + if item["LifeCycleState"] == self.STATE_AVAILABLE: + item["Tags"] = self.get_tags(FileSystemId=item["FileSystemId"]) + item["MountTargets"] = list(self.get_mount_targets(FileSystemId=item["FileSystemId"])) else: - item['Tags'] = {} - item['MountTargets'] = [] + item["Tags"] = {} + item["MountTargets"] = [] yield item def get_tags(self, **kwargs): """ - Returns tag list for selected instance of EFS + Returns tag list for selected instance of EFS """ - tags = self.connection.describe_tags(**kwargs)['Tags'] + tags = self.connection.describe_tags(**kwargs)["Tags"] return tags def get_mount_targets(self, **kwargs): """ - Returns mount targets for selected instance of EFS + Returns mount targets for selected instance of EFS """ targets = iterate_all( - 'MountTargets', + "MountTargets", self.connection.describe_mount_targets, - **kwargs + **kwargs, ) for target in targets: - if target['LifeCycleState'] == self.STATE_AVAILABLE: - target['SecurityGroups'] = list(self.get_security_groups( - MountTargetId=target['MountTargetId'] - )) + if target["LifeCycleState"] == self.STATE_AVAILABLE: + target["SecurityGroups"] = list(self.get_security_groups(MountTargetId=target["MountTargetId"])) else: - target['SecurityGroups'] = [] + target["SecurityGroups"] = [] yield target def get_security_groups(self, **kwargs): """ - Returns security groups for selected instance of EFS + Returns security groups for selected instance of EFS """ return iterate_all( - 'SecurityGroups', + "SecurityGroups", self.connection.describe_mount_target_security_groups, - **kwargs + **kwargs, ) def get_file_system_id(self, name): """ - Returns ID of instance by instance name + Returns ID of instance by instance name """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - CreationToken=name - )) - return info and info['FileSystemId'] or None + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + CreationToken=name, + ) + ) + return info and info["FileSystemId"] or None def get_file_system_state(self, name, file_system_id=None): """ - Returns state of filesystem by EFS id/name + Returns state of filesystem by EFS id/name """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - CreationToken=name, - FileSystemId=file_system_id - )) - return info and info['LifeCycleState'] or self.STATE_DELETED + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + CreationToken=name, + FileSystemId=file_system_id, + ) + ) + return info and info["LifeCycleState"] or self.STATE_DELETED def get_mount_targets_in_state(self, file_system_id, states=None): """ - Returns states of mount targets of selected EFS with selected state(s) (optional) + Returns states of mount targets of selected EFS with selected state(s) (optional) """ targets = iterate_all( - 'MountTargets', + "MountTargets", self.connection.describe_mount_targets, - FileSystemId=file_system_id + FileSystemId=file_system_id, ) if states: if not isinstance(states, list): states = [states] - targets = filter(lambda target: target['LifeCycleState'] in states, targets) + targets = filter(lambda target: target["LifeCycleState"] in states, targets) return list(targets) @@ -396,47 +394,53 @@ class EFSConnection(object): """ Returns throughput mode for selected EFS instance """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - **kwargs - )) + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + **kwargs, + ) + ) - return info and info['ThroughputMode'] or None + return info and info["ThroughputMode"] or None def get_provisioned_throughput_in_mibps(self, **kwargs): """ Returns throughput mode for selected EFS instance """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - **kwargs - )) - return info.get('ProvisionedThroughputInMibps', None) + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + **kwargs, + ) + ) + return info.get("ProvisionedThroughputInMibps", None) - def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps): + def create_file_system( + self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps + ): """ - Creates new filesystem with selected name + Creates new filesystem with selected name """ changed = False state = self.get_file_system_state(name) params = {} - params['CreationToken'] = name - params['PerformanceMode'] = performance_mode + params["CreationToken"] = name + params["PerformanceMode"] = performance_mode if encrypt: - params['Encrypted'] = encrypt + params["Encrypted"] = encrypt if kms_key_id is not None: - params['KmsKeyId'] = kms_key_id + params["KmsKeyId"] = kms_key_id if throughput_mode: - params['ThroughputMode'] = throughput_mode + params["ThroughputMode"] = throughput_mode if provisioned_throughput_in_mibps: - params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps if state in [self.STATE_DELETING, self.STATE_DELETED]: wait_for( lambda: self.get_file_system_state(name), - self.STATE_DELETED + self.STATE_DELETED, ) try: self.connection.create_file_system(**params) @@ -450,7 +454,7 @@ class EFSConnection(object): wait_for( lambda: self.get_file_system_state(name), self.STATE_AVAILABLE, - self.wait_timeout + self.wait_timeout, ) return changed @@ -467,14 +471,14 @@ class EFSConnection(object): current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id) params = dict() if throughput_mode and throughput_mode != current_mode: - params['ThroughputMode'] = throughput_mode + params["ThroughputMode"] = throughput_mode if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput: - params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps if len(params) > 0: wait_for( lambda: self.get_file_system_state(name), self.STATE_AVAILABLE, - self.wait_timeout + self.wait_timeout, ) try: self.connection.update_file_system(FileSystemId=fs_id, **params) @@ -492,11 +496,11 @@ class EFSConnection(object): if state in [self.STATE_AVAILABLE, self.STATE_CREATING]: fs_id = self.get_file_system_id(name) current_policies = self.connection.describe_lifecycle_configuration(FileSystemId=fs_id) - if transition_to_ia == 'None': + if transition_to_ia == "None": LifecyclePolicies = [] else: - LifecyclePolicies = [{'TransitionToIA': 'AFTER_' + transition_to_ia + '_DAYS'}] - if current_policies.get('LifecyclePolicies') != LifecyclePolicies: + LifecyclePolicies = [{"TransitionToIA": "AFTER_" + transition_to_ia + "_DAYS"}] + if current_policies.get("LifecyclePolicies") != LifecyclePolicies: response = self.connection.put_lifecycle_configuration( FileSystemId=fs_id, LifecyclePolicies=LifecyclePolicies, @@ -506,20 +510,19 @@ class EFSConnection(object): def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps): """ - Change attributes (mount targets and tags) of filesystem by name + Change attributes (mount targets and tags) of filesystem by name """ result = False fs_id = self.get_file_system_id(name) if tags is not None: - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags + ) if tags_to_delete: try: - self.connection.delete_tags( - FileSystemId=fs_id, - TagKeys=tags_to_delete - ) + self.connection.delete_tags(FileSystemId=fs_id, TagKeys=tags_to_delete) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to delete tags.") @@ -528,8 +531,7 @@ class EFSConnection(object): if tags_need_modify: try: self.connection.create_tags( - FileSystemId=fs_id, - Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) + FileSystemId=fs_id, Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to create tags.") @@ -540,54 +542,56 @@ class EFSConnection(object): incomplete_states = [self.STATE_CREATING, self.STATE_DELETING] wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0 + 0, ) - current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id)) - targets = _index_by_key('SubnetId', targets) + current_targets = _index_by_key("SubnetId", self.get_mount_targets(FileSystemId=fs_id)) + targets = _index_by_key("SubnetId", targets) - targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, - targets, True) + targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, targets, True) # To modify mount target it should be deleted and created again - changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'], - current_targets[sid], targets[sid])] + changed = [ + sid + for sid in intersection + if not targets_equal( + ["SubnetId", "IpAddress", "NetworkInterfaceId"], current_targets[sid], targets[sid] + ) + ] targets_to_delete = list(targets_to_delete) + changed targets_to_create = list(targets_to_create) + changed if targets_to_delete: for sid in targets_to_delete: - self.connection.delete_mount_target( - MountTargetId=current_targets[sid]['MountTargetId'] - ) + self.connection.delete_mount_target(MountTargetId=current_targets[sid]["MountTargetId"]) wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0 + 0, ) result = True if targets_to_create: for sid in targets_to_create: - self.connection.create_mount_target( - FileSystemId=fs_id, - **targets[sid] - ) + self.connection.create_mount_target(FileSystemId=fs_id, **targets[sid]) wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), 0, - self.wait_timeout + self.wait_timeout, ) result = True # If no security groups were passed into the module, then do not change it. - security_groups_to_update = [sid for sid in intersection if - 'SecurityGroups' in targets[sid] and - current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']] + security_groups_to_update = [ + sid + for sid in intersection + if "SecurityGroups" in targets[sid] + and current_targets[sid]["SecurityGroups"] != targets[sid]["SecurityGroups"] + ] if security_groups_to_update: for sid in security_groups_to_update: self.connection.modify_mount_target_security_groups( - MountTargetId=current_targets[sid]['MountTargetId'], - SecurityGroups=targets[sid].get('SecurityGroups', None) + MountTargetId=current_targets[sid]["MountTargetId"], + SecurityGroups=targets[sid].get("SecurityGroups", None), ) result = True @@ -595,14 +599,14 @@ class EFSConnection(object): def delete_file_system(self, name, file_system_id=None): """ - Removes EFS instance by id/name + Removes EFS instance by id/name """ result = False state = self.get_file_system_state(name, file_system_id) if state in [self.STATE_CREATING, self.STATE_AVAILABLE]: wait_for( lambda: self.get_file_system_state(name), - self.STATE_AVAILABLE + self.STATE_AVAILABLE, ) if not file_system_id: file_system_id = self.get_file_system_id(name) @@ -614,27 +618,27 @@ class EFSConnection(object): wait_for( lambda: self.get_file_system_state(name), self.STATE_DELETED, - self.wait_timeout + self.wait_timeout, ) return result def delete_mount_targets(self, file_system_id): """ - Removes mount targets by EFS id + Removes mount targets by EFS id """ wait_for( lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)), - 0 + 0, ) targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE) for target in targets: - self.connection.delete_mount_target(MountTargetId=target['MountTargetId']) + self.connection.delete_mount_target(MountTargetId=target["MountTargetId"]) wait_for( lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)), - 0 + 0, ) return len(targets) > 0 @@ -642,7 +646,7 @@ class EFSConnection(object): def iterate_all(attr, map_method, **kwargs): """ - Method creates iterator from result set + Method creates iterator from result set """ args = dict((key, value) for (key, value) in kwargs.items() if value is not None) wait = 1 @@ -651,11 +655,11 @@ def iterate_all(attr, map_method, **kwargs): data = map_method(**args) for elm in data[attr]: yield elm - if 'NextMarker' in data: - args['Marker'] = data['Nextmarker'] + if "NextMarker" in data: + args["Marker"] = data["Nextmarker"] continue break - except is_boto3_error_code('ThrottlingException'): + except is_boto3_error_code("ThrottlingException"): if wait < 600: sleep(wait) wait = wait * 2 @@ -666,7 +670,7 @@ def iterate_all(attr, map_method, **kwargs): def targets_equal(keys, a, b): """ - Method compare two mount targets by specified attributes + Method compare two mount targets by specified attributes """ for key in keys: if key in b and a[key] != b[key]: @@ -677,7 +681,7 @@ def targets_equal(keys, a, b): def dict_diff(dict1, dict2, by_key=False): """ - Helper method to calculate difference of two dictionaries + Helper method to calculate difference of two dictionaries """ keys1 = set(dict1.keys() if by_key else dict1.items()) keys2 = set(dict2.keys() if by_key else dict2.items()) @@ -689,7 +693,7 @@ def dict_diff(dict1, dict2, by_key=False): def first_or_default(items, default=None): """ - Helper method to fetch first element of list (if exists) + Helper method to fetch first element of list (if exists) """ for item in items: return item @@ -698,13 +702,13 @@ def first_or_default(items, default=None): def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS): """ - Helper method to wait for desired value returned by callback method + Helper method to wait for desired value returned by callback method """ wait_start = timestamp() while True: if callback() != value: if timeout != 0 and (timestamp() - wait_start > timeout): - raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)') + raise RuntimeError("Wait timeout exceeded (" + str(timeout) + " sec)") else: sleep(5) continue @@ -713,67 +717,82 @@ def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS def main(): """ - Module action handler + Module action handler """ argument_spec = dict( encrypt=dict(required=False, type="bool", default=False), - state=dict(required=False, type='str', choices=["present", "absent"], default="present"), - kms_key_id=dict(required=False, type='str', default=None), - purge_tags=dict(default=True, type='bool'), - id=dict(required=False, type='str', default=None), - name=dict(required=False, type='str', default=None), - tags=dict(required=False, type="dict", aliases=['resource_tags']), - targets=dict(required=False, type="list", default=[], elements='dict'), - performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), - transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None), - throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None), - provisioned_throughput_in_mibps=dict(required=False, type='float'), + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + kms_key_id=dict(required=False, type="str", default=None), + purge_tags=dict(default=True, type="bool"), + id=dict(required=False, type="str", default=None), + name=dict(required=False, type="str", default=None), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + targets=dict(required=False, type="list", default=[], elements="dict"), + performance_mode=dict( + required=False, type="str", choices=["general_purpose", "max_io"], default="general_purpose" + ), + transition_to_ia=dict(required=False, type="str", choices=["None", "7", "14", "30", "60", "90"], default=None), + throughput_mode=dict(required=False, type="str", choices=["bursting", "provisioned"], default=None), + provisioned_throughput_in_mibps=dict(required=False, type="float"), wait=dict(required=False, type="bool", default=False), - wait_timeout=dict(required=False, type="int", default=0) + wait_timeout=dict(required=False, type="int", default=0), ) module = AnsibleAWSModule(argument_spec=argument_spec) connection = EFSConnection(module) - name = module.params.get('name') - fs_id = module.params.get('id') - tags = module.params.get('tags') + name = module.params.get("name") + fs_id = module.params.get("id") + tags = module.params.get("tags") target_translations = { - 'ip_address': 'IpAddress', - 'security_groups': 'SecurityGroups', - 'subnet_id': 'SubnetId' + "ip_address": "IpAddress", + "security_groups": "SecurityGroups", + "subnet_id": "SubnetId", } - targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')] + targets = [ + dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get("targets") + ] performance_mode_translations = { - 'general_purpose': 'generalPurpose', - 'max_io': 'maxIO' + "general_purpose": "generalPurpose", + "max_io": "maxIO", } - encrypt = module.params.get('encrypt') - kms_key_id = module.params.get('kms_key_id') - performance_mode = performance_mode_translations[module.params.get('performance_mode')] - purge_tags = module.params.get('purge_tags') - transition_to_ia = module.params.get('transition_to_ia') - throughput_mode = module.params.get('throughput_mode') - provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps') - state = str(module.params.get('state')).lower() + encrypt = module.params.get("encrypt") + kms_key_id = module.params.get("kms_key_id") + performance_mode = performance_mode_translations[module.params.get("performance_mode")] + purge_tags = module.params.get("purge_tags") + transition_to_ia = module.params.get("transition_to_ia") + throughput_mode = module.params.get("throughput_mode") + provisioned_throughput_in_mibps = module.params.get("provisioned_throughput_in_mibps") + state = str(module.params.get("state")).lower() changed = False - if state == 'present': + if state == "present": if not name: - module.fail_json(msg='Name parameter is required for create') + module.fail_json(msg="Name parameter is required for create") - changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps) + changed = connection.create_file_system( + name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps + ) changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed - changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets, - throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed + changed = ( + connection.converge_file_system( + name=name, + tags=tags, + purge_tags=purge_tags, + targets=targets, + throughput_mode=throughput_mode, + provisioned_throughput_in_mibps=provisioned_throughput_in_mibps, + ) + or changed + ) if transition_to_ia: changed |= connection.update_lifecycle_policy(name, transition_to_ia) result = first_or_default(connection.get_file_systems(CreationToken=name)) - elif state == 'absent': + elif state == "absent": if not name and not fs_id: - module.fail_json(msg='Either name or id parameter is required for delete') + module.fail_json(msg="Either name or id parameter is required for delete") changed = connection.delete_file_system(name, fs_id) result = None @@ -782,5 +801,5 @@ def main(): module.exit_json(changed=changed, efs=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/efs_info.py b/ansible_collections/community/aws/plugins/modules/efs_info.py index 5ef436f3c..3a170a391 100644 --- a/ansible_collections/community/aws/plugins/modules/efs_info.py +++ b/ansible_collections/community/aws/plugins/modules/efs_info.py @@ -1,21 +1,19 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: efs_info version_added: 1.0.0 short_description: Get information about Amazon EFS file systems description: - - This module can be used to search Amazon EFS file systems. - Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! +- This module can be used to search Amazon EFS file systems. + Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! author: - - "Ryan Sydnor (@ryansydnor)" +- "Ryan Sydnor (@ryansydnor)" options: name: description: @@ -39,13 +37,12 @@ options: elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Find all existing efs community.aws.efs_info: register: result @@ -58,17 +55,17 @@ EXAMPLES = r''' - name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a' community.aws.efs_info: tags: - Name: myTestNameTag + Name: myTestNameTag targets: - - subnet-1a2b3c4d - - sg-4d3c2b1a + - subnet-1a2b3c4d + - sg-4d3c2b1a register: result - ansible.builtin.debug: msg: "{{ result['efs'] }}" -''' +""" -RETURN = r''' +RETURN = r""" creation_time: description: timestamp of creation date returned: always @@ -167,8 +164,7 @@ tags: "name": "my-efs", "key": "Value" } - -''' +""" from collections import defaultdict @@ -180,90 +176,94 @@ except ImportError: from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class EFSConnection(object): - STATE_CREATING = 'creating' - STATE_AVAILABLE = 'available' - STATE_DELETING = 'deleting' - STATE_DELETED = 'deleted' + STATE_CREATING = "creating" + STATE_AVAILABLE = "available" + STATE_DELETING = "deleting" + STATE_DELETED = "deleted" def __init__(self, module): try: - self.connection = module.client('efs') + self.connection = module.client("efs") self.module = module except Exception as e: - module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e)) + module.fail_json(msg=f"Failed to connect to AWS: {to_native(e)}") self.region = module.region - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def list_file_systems(self, **kwargs): """ Returns generator of file systems including all attributes of FS """ - paginator = self.connection.get_paginator('describe_file_systems') - return paginator.paginate(**kwargs).build_full_result()['FileSystems'] + paginator = self.connection.get_paginator("describe_file_systems") + return paginator.paginate(**kwargs).build_full_result()["FileSystems"] - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_tags(self, file_system_id): """ Returns tag list for selected instance of EFS """ - paginator = self.connection.get_paginator('describe_tags') - return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags']) + paginator = self.connection.get_paginator("describe_tags") + return boto3_tag_list_to_ansible_dict( + paginator.paginate(FileSystemId=file_system_id).build_full_result()["Tags"] + ) - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_mount_targets(self, file_system_id): """ Returns mount targets for selected instance of EFS """ - paginator = self.connection.get_paginator('describe_mount_targets') - return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets'] + paginator = self.connection.get_paginator("describe_mount_targets") + return paginator.paginate(FileSystemId=file_system_id).build_full_result()["MountTargets"] - @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.jittered_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_security_groups(self, mount_target_id): """ Returns security groups for selected instance of EFS """ - return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups'] + return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)["SecurityGroups"] def get_mount_targets_data(self, file_systems): for item in file_systems: - if item['life_cycle_state'] == self.STATE_AVAILABLE: + if item["life_cycle_state"] == self.STATE_AVAILABLE: try: - mount_targets = self.get_mount_targets(item['file_system_id']) + mount_targets = self.get_mount_targets(item["file_system_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS targets") for mt in mount_targets: - item['mount_targets'].append(camel_dict_to_snake_dict(mt)) + item["mount_targets"].append(camel_dict_to_snake_dict(mt)) return file_systems def get_security_groups_data(self, file_systems): for item in file_systems: - if item['life_cycle_state'] == self.STATE_AVAILABLE: - for target in item['mount_targets']: - if target['life_cycle_state'] == self.STATE_AVAILABLE: + if item["life_cycle_state"] == self.STATE_AVAILABLE: + for target in item["mount_targets"]: + if target["life_cycle_state"] == self.STATE_AVAILABLE: try: - target['security_groups'] = self.get_security_groups(target['mount_target_id']) + target["security_groups"] = self.get_security_groups(target["mount_target_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS security groups") else: - target['security_groups'] = [] + target["security_groups"] = [] else: - item['tags'] = {} - item['mount_targets'] = [] + item["tags"] = {} + item["mount_targets"] = [] return file_systems def get_file_systems(self, file_system_id=None, creation_token=None): kwargs = dict() if file_system_id: - kwargs['FileSystemId'] = file_system_id + kwargs["FileSystemId"] = file_system_id if creation_token: - kwargs['CreationToken'] = creation_token + kwargs["CreationToken"] = creation_token try: file_systems = self.list_file_systems(**kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -271,7 +271,7 @@ class EFSConnection(object): results = list() for item in file_systems: - item['CreationTime'] = str(item['CreationTime']) + item["CreationTime"] = str(item["CreationTime"]) """ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose @@ -279,18 +279,18 @@ class EFSConnection(object): AWS documentation is available here: U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) """ - item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" + item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" - if 'Timestamp' in item['SizeInBytes']: - item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) + if "Timestamp" in item["SizeInBytes"]: + item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) result = camel_dict_to_snake_dict(item) - result['tags'] = {} - result['mount_targets'] = [] + result["tags"] = {} + result["mount_targets"] = [] # Set tags *after* doing camel to snake - if result['life_cycle_state'] == self.STATE_AVAILABLE: + if result["life_cycle_state"] == self.STATE_AVAILABLE: try: - result['tags'] = self.get_tags(result['file_system_id']) + result["tags"] = self.get_tags(result["file_system_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS tags") results.append(result) @@ -302,13 +302,14 @@ def prefix_to_attr(attr_id): Helper method to convert ID prefix to mount target attribute """ attr_by_prefix = { - 'fsmt-': 'mount_target_id', - 'subnet-': 'subnet_id', - 'eni-': 'network_interface_id', - 'sg-': 'security_groups' + "fsmt-": "mount_target_id", + "subnet-": "subnet_id", + "eni-": "network_interface_id", + "sg-": "security_groups", } - return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items() - if str(attr_id).startswith(prefix)], 'ip_address') + return first_or_default( + [attr_name for (prefix, attr_name) in attr_by_prefix.items() if str(attr_id).startswith(prefix)], "ip_address" + ) def first_or_default(items, default=None): @@ -335,7 +336,7 @@ def has_targets(available, required): Helper method to determine if mount target requested already exists """ grouped = group_list_of_dict(available) - for (value, field) in required: + for value, field in required: if field not in grouped or value not in grouped[field]: return False return True @@ -358,35 +359,34 @@ def main(): """ argument_spec = dict( id=dict(), - name=dict(aliases=['creation_token']), + name=dict(aliases=["creation_token"]), tags=dict(type="dict", default={}), - targets=dict(type="list", default=[], elements='str') + targets=dict(type="list", default=[], elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) connection = EFSConnection(module) - name = module.params.get('name') - fs_id = module.params.get('id') - tags = module.params.get('tags') - targets = module.params.get('targets') + name = module.params.get("name") + fs_id = module.params.get("id") + tags = module.params.get("tags") + targets = module.params.get("targets") file_systems_info = connection.get_file_systems(fs_id, name) if tags: - file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)] + file_systems_info = [item for item in file_systems_info if has_tags(item["tags"], tags)] file_systems_info = connection.get_mount_targets_data(file_systems_info) file_systems_info = connection.get_security_groups_data(file_systems_info) if targets: targets = [(item, prefix_to_attr(item)) for item in targets] - file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)] + file_systems_info = [item for item in file_systems_info if has_targets(item["mount_targets"], targets)] module.exit_json(changed=False, efs=file_systems_info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/efs_tag.py b/ansible_collections/community/aws/plugins/modules/efs_tag.py index 1529fa944..0f5143471 100644 --- a/ansible_collections/community/aws/plugins/modules/efs_tag.py +++ b/ansible_collections/community/aws/plugins/modules/efs_tag.py @@ -1,21 +1,17 @@ #!/usr/bin/python -""" -Copyright: (c) 2021, Milan Zink <zeten30@gmail.com> -GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -""" +# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright: (c) 2021, Milan Zink <zeten30@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: efs_tag version_added: 2.0.0 short_description: create and remove tags on Amazon EFS resources description: - - Creates and removes tags for Amazon EFS resources. - - Resources are referenced by their ID (filesystem or filesystem access point). + - Creates and removes tags for Amazon EFS resources. + - Resources are referenced by their ID (filesystem or filesystem access point). author: - Milan Zink (@zeten30) options: @@ -44,13 +40,12 @@ options: type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure tags are present on a resource community.aws.efs_tag: resource: fs-123456ab @@ -71,7 +66,7 @@ EXAMPLES = r''' resource: fsap-78945ff state: absent tags: - Name: foo + Name: foo purge_tags: true - name: Remove all tags @@ -80,9 +75,9 @@ EXAMPLES = r''' state: absent tags: {} purge_tags: true -''' +""" -RETURN = r''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always @@ -95,51 +90,56 @@ removed_tags: description: A dict of tags that were removed from the resource returned: If tags were removed type: dict -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # Handled by AnsibleAWSModule pass -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags, AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing WAIT_RETRY = 5 # how many seconds to wait between propagation status polls def get_tags(efs, module, resource): - ''' + """ Get resource tags - ''' + """ try: - return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)['Tags']) + return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)["Tags"]) except (BotoCoreError, ClientError) as get_tags_error: - module.fail_json_aws(get_tags_error, msg='Failed to fetch tags for resource {0}'.format(resource)) + module.fail_json_aws(get_tags_error, msg=f"Failed to fetch tags for resource {resource}") def main(): - ''' + """ MAIN - ''' + """ argument_spec = dict( resource=dict(required=True), - tags=dict(type='dict', required=True, aliases=['resource_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']) + tags=dict(type="dict", required=True, aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - efs = module.client('efs', retry_decorator=AWSRetry.jittered_backoff()) + efs = module.client("efs", retry_decorator=AWSRetry.jittered_backoff()) current_tags = get_tags(efs, module, resource) @@ -147,7 +147,7 @@ def main(): remove_tags = {} - if state == 'absent': + if state == "absent": for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): remove_tags[key] = current_tags[key] @@ -156,28 +156,30 @@ def main(): remove_tags[key] = current_tags[key] if remove_tags: - result['changed'] = True - result['removed_tags'] = remove_tags + result["changed"] = True + result["removed_tags"] = remove_tags if not module.check_mode: try: efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as remove_tag_error: - module.fail_json_aws(remove_tag_error, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + module.fail_json_aws( + remove_tag_error, msg=f"Failed to remove tags {remove_tags} from resource {resource}" + ) - if state == 'present' and add_tags: - result['changed'] = True - result['added_tags'] = add_tags + if state == "present" and add_tags: + result["changed"] = True + result["added_tags"] = add_tags current_tags.update(add_tags) if not module.check_mode: try: tags = ansible_dict_to_boto3_tag_list(add_tags) efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags) except (BotoCoreError, ClientError) as set_tag_error: - module.fail_json_aws(set_tag_error, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {add_tags} on resource {resource}") - result['tags'] = get_tags(efs, module, resource) + result["tags"] = get_tags(efs, module, resource) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/eks_cluster.py b/ansible_collections/community/aws/plugins/modules/eks_cluster.py index 18a5055e9..a445def55 100644 --- a/ansible_collections/community/aws/plugins/modules/eks_cluster.py +++ b/ansible_collections/community/aws/plugins/modules/eks_cluster.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: eks_cluster version_added: 1.0.0 @@ -63,13 +61,12 @@ options: default: 1200 type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an EKS cluster @@ -89,9 +86,9 @@ EXAMPLES = r''' name: my_cluster wait: true state: absent -''' +""" -RETURN = r''' +RETURN = r""" arn: description: ARN of the EKS cluster returned: when state is present @@ -163,42 +160,45 @@ version: returned: when state is present type: str sample: '1.10' -''' - - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +""" try: - import botocore.exceptions + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def ensure_present(client, module): - name = module.params.get('name') - subnets = module.params['subnets'] - groups = module.params['security_groups'] - wait = module.params.get('wait') + name = module.params.get("name") + subnets = module.params["subnets"] + groups = module.params["security_groups"] + wait = module.params.get("wait") cluster = get_cluster(client, module) try: - ec2 = module.client('ec2') - vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId'] + ec2 = module.client("ec2") + vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't lookup security groups") if cluster: - if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets): + if set(cluster["resourcesVpcConfig"]["subnetIds"]) != set(subnets): module.fail_json(msg="Cannot modify subnets of existing cluster") - if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups): + if set(cluster["resourcesVpcConfig"]["securityGroupIds"]) != set(groups): module.fail_json(msg="Cannot modify security groups of existing cluster") - if module.params.get('version') and module.params.get('version') != cluster['version']: + if module.params.get("version") and module.params.get("version") != cluster["version"]: module.fail_json(msg="Cannot modify version of existing cluster") if wait: - wait_until(client, module, 'cluster_active') + wait_until(client, module, "cluster_active") # Ensure that fields that are only available for active clusters are # included in the returned value cluster = get_cluster(client, module) @@ -208,24 +208,23 @@ def ensure_present(client, module): if module.check_mode: module.exit_json(changed=True) try: - params = dict(name=name, - roleArn=module.params['role_arn'], - resourcesVpcConfig=dict( - subnetIds=subnets, - securityGroupIds=groups), - ) - if module.params['version']: - params['version'] = module.params['version'] - if module.params['tags']: - params['tags'] = module.params['tags'] - cluster = client.create_cluster(**params)['cluster'] + params = dict( + name=name, + roleArn=module.params["role_arn"], + resourcesVpcConfig=dict(subnetIds=subnets, securityGroupIds=groups), + ) + if module.params["version"]: + params["version"] = module.params["version"] + if module.params["tags"]: + params["tags"] = module.params["tags"] + cluster = client.create_cluster(**params)["cluster"] except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't create cluster {name}") if wait: - wait_until(client, module, 'cluster_active') + wait_until(client, module, "cluster_active") # Ensure that fields that are only available for active clusters are # included in the returned value cluster = get_cluster(client, module) @@ -234,44 +233,47 @@ def ensure_present(client, module): def ensure_absent(client, module): - name = module.params.get('name') + name = module.params.get("name") existing = get_cluster(client, module) - wait = module.params.get('wait') + wait = module.params.get("wait") if not existing: module.exit_json(changed=False) if not module.check_mode: try: - client.delete_cluster(name=module.params['name']) + client.delete_cluster(name=module.params["name"]) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't delete cluster {name}") if wait: - wait_until(client, module, 'cluster_deleted') + wait_until(client, module, "cluster_deleted") module.exit_json(changed=True) def get_cluster(client, module): - name = module.params.get('name') + name = module.params.get("name") try: - return client.describe_cluster(name=name)['cluster'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_cluster(name=name)["cluster"] + except is_boto3_error_code("ResourceNotFoundException"): return None except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get cluster %s" % name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get cluster {name}") -def wait_until(client, module, waiter_name='cluster_active'): - name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') +def wait_until(client, module, waiter_name="cluster_active"): + name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) - waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(name=name, WaiterConfig={"MaxAttempts": attempts}) def main(): @@ -279,27 +281,27 @@ def main(): name=dict(required=True), version=dict(), role_arn=dict(), - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - state=dict(choices=['absent', 'present'], default='present'), - tags=dict(type='dict', required=False), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(type="dict", required=False), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]], + required_if=[["state", "present", ["role_arn", "subnets", "security_groups"]]], supports_check_mode=True, ) - client = module.client('eks') + client = module.client("eks") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": ensure_present(client, module) else: ensure_absent(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py b/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py index d78cbbe2d..131f0651b 100644 --- a/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py +++ b/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: eks_fargate_profile version_added: 4.0.0 @@ -68,14 +66,13 @@ options: default: 1200 type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an EKS Fargate Profile @@ -98,9 +95,9 @@ EXAMPLES = r''' cluster_name: test_cluster wait: true state: absent -''' +""" -RETURN = r''' +RETURN = r""" fargate_profile_name: description: Name of Fargate Profile. returned: when state is present @@ -164,74 +161,77 @@ status: sample: - CREATING - ACTIVE -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +""" try: - import botocore.exceptions + import botocore except ImportError: pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def validate_tags(client, module, fargate_profile): changed = False - desired_tags = module.params.get('tags') + desired_tags = module.params.get("tags") if desired_tags is None: return False try: - existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) + existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile["fargateProfileArn"])["tags"] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to list or compare tags for Fargate Profile {module.params.get('name')}") if tags_to_remove: changed = True if not module.check_mode: try: - client.untag_resource(resourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove) + client.untag_resource(resourceArn=fargate_profile["fargateProfileArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") if tags_to_add: changed = True if not module.check_mode: try: - client.tag_resource(resourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add) + client.tag_resource(resourceArn=fargate_profile["fargateProfileArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") return changed def create_or_update_fargate_profile(client, module): - name = module.params.get('name') - subnets = module.params['subnets'] - role_arn = module.params['role_arn'] - cluster_name = module.params['cluster_name'] - selectors = module.params['selectors'] - tags = module.params['tags'] or {} - wait = module.params.get('wait') + name = module.params.get("name") + subnets = module.params["subnets"] + role_arn = module.params["role_arn"] + cluster_name = module.params["cluster_name"] + selectors = module.params["selectors"] + tags = module.params["tags"] or {} + wait = module.params.get("wait") fargate_profile = get_fargate_profile(client, module, name, cluster_name) if fargate_profile: changed = False - if set(fargate_profile['podExecutionRoleArn']) != set(role_arn): + if set(fargate_profile["podExecutionRoleArn"]) != set(role_arn): module.fail_json(msg="Cannot modify Execution Role") - if set(fargate_profile['subnets']) != set(subnets): + if set(fargate_profile["subnets"]) != set(subnets): module.fail_json(msg="Cannot modify Subnets") - if fargate_profile['selectors'] != selectors: + if fargate_profile["selectors"] != selectors: module.fail_json(msg="Cannot modify Selectors") changed = validate_tags(client, module, fargate_profile) if wait: - wait_until(client, module, 'fargate_profile_active', name, cluster_name) + wait_until(client, module, "fargate_profile_active", name, cluster_name) fargate_profile = get_fargate_profile(client, module, name, cluster_name) module.exit_json(changed=changed, **camel_dict_to_snake_dict(fargate_profile)) @@ -242,29 +242,30 @@ def create_or_update_fargate_profile(client, module): check_profiles_status(client, module, cluster_name) try: - params = dict(fargateProfileName=name, - podExecutionRoleArn=role_arn, - subnets=subnets, - clusterName=cluster_name, - selectors=selectors, - tags=tags - ) + params = dict( + fargateProfileName=name, + podExecutionRoleArn=role_arn, + subnets=subnets, + clusterName=cluster_name, + selectors=selectors, + tags=tags, + ) fargate_profile = client.create_fargate_profile(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name) + module.fail_json_aws(e, msg=f"Couldn't create fargate profile {name}") if wait: - wait_until(client, module, 'fargate_profile_active', name, cluster_name) + wait_until(client, module, "fargate_profile_active", name, cluster_name) fargate_profile = get_fargate_profile(client, module, name, cluster_name) module.exit_json(changed=True, **camel_dict_to_snake_dict(fargate_profile)) def delete_fargate_profile(client, module): - name = module.params.get('name') - cluster_name = module.params['cluster_name'] + name = module.params.get("name") + cluster_name = module.params["cluster_name"] existing = get_fargate_profile(client, module, name, cluster_name) - wait = module.params.get('wait') + wait = module.params.get("wait") if not existing or existing["status"] == "DELETING": module.exit_json(changed=False) @@ -273,20 +274,23 @@ def delete_fargate_profile(client, module): try: client.delete_fargate_profile(clusterName=cluster_name, fargateProfileName=name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name) + module.fail_json_aws(e, msg=f"Couldn't delete fargate profile {name}") if wait: - wait_until(client, module, 'fargate_profile_deleted', name, cluster_name) + wait_until(client, module, "fargate_profile_deleted", name, cluster_name) module.exit_json(changed=True) def get_fargate_profile(client, module, name, cluster_name): try: - return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)['fargateProfile'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)["fargateProfile"] + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get fargate profile") @@ -297,20 +301,24 @@ def check_profiles_status(client, module, cluster_name): for name in list_profiles["fargateProfileNames"]: fargate_profile = get_fargate_profile(client, module, name, cluster_name) - if fargate_profile["status"] == 'CREATING': - wait_until(client, module, 'fargate_profile_active', fargate_profile["fargateProfileName"], cluster_name) - elif fargate_profile["status"] == 'DELETING': - wait_until(client, module, 'fargate_profile_deleted', fargate_profile["fargateProfileName"], cluster_name) + if fargate_profile["status"] == "CREATING": + wait_until( + client, module, "fargate_profile_active", fargate_profile["fargateProfileName"], cluster_name + ) + elif fargate_profile["status"] == "DELETING": + wait_until( + client, module, "fargate_profile_deleted", fargate_profile["fargateProfileName"], cluster_name + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't not find EKS cluster") def wait_until(client, module, waiter_name, name, cluster_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) try: - waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="An error occurred waiting") @@ -320,34 +328,38 @@ def main(): name=dict(required=True), cluster_name=dict(required=True), role_arn=dict(), - subnets=dict(type='list', elements='str'), - selectors=dict(type='list', elements='dict', options=dict( - namespace=dict(type='str'), - labels=dict(type='dict', default={}) - )), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['absent', 'present'], default='present'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + subnets=dict(type="list", elements="str"), + selectors=dict( + type="list", + elements="dict", + options=dict( + namespace=dict(type="str"), + labels=dict(type="dict", default={}), + ), + ), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["absent", "present"], default="present"), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['role_arn', 'subnets', 'selectors']]], + required_if=[["state", "present", ["role_arn", "subnets", "selectors"]]], supports_check_mode=True, ) try: - client = module.client('eks') + client = module.client("eks") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't connect to AWS") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": create_or_update_fargate_profile(client, module) else: delete_fargate_profile(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py b/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py index 78979afc2..f9bbb7857 100644 --- a/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py +++ b/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: eks_nodegroup version_added: 5.3.0 @@ -169,12 +167,11 @@ options: default: 1200 type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create nodegroup @@ -187,29 +184,29 @@ EXAMPLES = r''' - subnet-qwerty123 - subnet-asdfg456 scaling_config: - - min_size: 1 - - max_size: 2 - - desired_size: 1 + min_size: 1 + max_size: 2 + desired_size: 1 disk_size: 20 instance_types: 't3.micro' ami_type: 'AL2_x86_64' labels: - - 'teste': 'test' + 'teste': 'test' taints: - key: 'test' value: 'test' effect: 'NO_SCHEDULE' - capacity_type: 'on_demand' + capacity_type: 'ON_DEMAND' - name: Remove an EKS Nodegrop community.aws.eks_nodegroup: name: test_nodegroup cluster_name: test_cluster - wait: yes + wait: true state: absent -''' +""" -RETURN = r''' +RETURN = r""" nodegroup_name: description: The name associated with an Amazon EKS managed node group. returned: when state is present @@ -345,45 +342,49 @@ tags: type: dict sample: foo: bar -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +""" try: - import botocore.exceptions + import botocore except ImportError: pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def validate_tags(client, module, nodegroup): changed = False - desired_tags = module.params.get('tags') + desired_tags = module.params.get("tags") if desired_tags is None: return False try: - existing_tags = client.list_tags_for_resource(resourceArn=nodegroup['nodegroupArn'])['tags'] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) + existing_tags = client.list_tags_for_resource(resourceArn=nodegroup["nodegroupArn"])["tags"] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list or compare tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to list or compare tags for Nodegroup {module.params.get('name')}.") if tags_to_remove: if not module.check_mode: changed = True try: - client.untag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tagKeys=tags_to_remove) + client.untag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") if tags_to_add: if not module.check_mode: changed = True try: - client.tag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tags=tags_to_add) + client.tag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") return changed @@ -404,24 +405,24 @@ def compare_taints(nodegroup_taints, param_taints): def validate_taints(client, module, nodegroup, param_taints): changed = False params = dict() - params['clusterName'] = nodegroup['clusterName'] - params['nodegroupName'] = nodegroup['nodegroupName'] - params['taints'] = [] - if 'taints' not in nodegroup: - nodegroup['taints'] = [] - taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup['taints'], param_taints) + params["clusterName"] = nodegroup["clusterName"] + params["nodegroupName"] = nodegroup["nodegroupName"] + params["taints"] = [] + if "taints" not in nodegroup: + nodegroup["taints"] = [] + taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup["taints"], param_taints) if taints_to_add_or_update: - params['taints']['addOrUpdateTaints'] = taints_to_add_or_update + params["taints"]["addOrUpdateTaints"] = taints_to_add_or_update if taints_to_unset: - params['taints']['removeTaints'] = taints_to_unset - if params['taints']: + params["taints"]["removeTaints"] = taints_to_unset + if params["taints"]: if not module.check_mode: changed = True try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set taints for Nodegroup %s.' % params['nodegroupName']) + module.fail_json_aws(e, msg=f"Unable to set taints for Nodegroup {params['nodegroupName']}.") return changed @@ -442,109 +443,114 @@ def compare_labels(nodegroup_labels, param_labels): def validate_labels(client, module, nodegroup, param_labels): changed = False params = dict() - params['clusterName'] = nodegroup['clusterName'] - params['nodegroupName'] = nodegroup['nodegroupName'] - params['labels'] = {} - labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup['labels'], param_labels) + params["clusterName"] = nodegroup["clusterName"] + params["nodegroupName"] = nodegroup["nodegroupName"] + params["labels"] = {} + labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup["labels"], param_labels) if labels_to_add_or_update: - params['labels']['addOrUpdateLabels'] = labels_to_add_or_update + params["labels"]["addOrUpdateLabels"] = labels_to_add_or_update if labels_to_unset: - params['labels']['removeLabels'] = labels_to_unset - if params['labels']: + params["labels"]["removeLabels"] = labels_to_unset + if params["labels"]: if not module.check_mode: changed = True try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set labels for Nodegroup %s.' % params['nodegroupName']) + module.fail_json_aws(e, msg=f"Unable to set labels for Nodegroup {params['nodegroupName']}.") return changed def compare_params(module, params, nodegroup): - for param in ['nodeRole', 'subnets', 'diskSize', 'instanceTypes', 'amiTypes', 'remoteAccess', 'capacityType']: + for param in ["nodeRole", "subnets", "diskSize", "instanceTypes", "amiTypes", "remoteAccess", "capacityType"]: if (param in nodegroup) and (param in params): - if (nodegroup[param] != params[param]): - module.fail_json(msg="Cannot modify parameter %s." % param) - if ('launchTemplate' not in nodegroup) and ('launchTemplate' in params): + if nodegroup[param] != params[param]: + module.fail_json(msg=f"Cannot modify parameter {param}.") + if ("launchTemplate" not in nodegroup) and ("launchTemplate" in params): module.fail_json(msg="Cannot add Launch Template in this Nodegroup.") - if nodegroup['updateConfig'] != params['updateConfig']: + if nodegroup["updateConfig"] != params["updateConfig"]: return True - if nodegroup['scalingConfig'] != params['scalingConfig']: + if nodegroup["scalingConfig"] != params["scalingConfig"]: return True return False def compare_params_launch_template(module, params, nodegroup): - if 'launchTemplate' not in params: + if "launchTemplate" not in params: module.fail_json(msg="Cannot exclude Launch Template in this Nodegroup.") else: - for key in ['name', 'id']: - if (key in params['launchTemplate']) and (params['launchTemplate'][key] != nodegroup['launchTemplate'][key]): - module.fail_json(msg="Cannot modify Launch Template %s." % key) - if ('version' in params['launchTemplate']) and (params['launchTemplate']['version'] != nodegroup['launchTemplate']['version']): + for key in ["name", "id"]: + if (key in params["launchTemplate"]) and ( + params["launchTemplate"][key] != nodegroup["launchTemplate"][key] + ): + module.fail_json(msg=f"Cannot modify Launch Template {key}.") + if ("version" in params["launchTemplate"]) and ( + params["launchTemplate"]["version"] != nodegroup["launchTemplate"]["version"] + ): return True return False def create_or_update_nodegroups(client, module): - changed = False params = dict() - params['nodegroupName'] = module.params['name'] - params['clusterName'] = module.params['cluster_name'] - params['nodeRole'] = module.params['node_role'] - params['subnets'] = module.params['subnets'] - params['tags'] = module.params['tags'] or {} - if module.params['ami_type'] is not None: - params['amiType'] = module.params['ami_type'] - if module.params['disk_size'] is not None: - params['diskSize'] = module.params['disk_size'] - if module.params['instance_types'] is not None: - params['instanceTypes'] = module.params['instance_types'] - if module.params['launch_template'] is not None: - params['launchTemplate'] = dict() - if module.params['launch_template']['id'] is not None: - params['launchTemplate']['id'] = module.params['launch_template']['id'] - if module.params['launch_template']['version'] is not None: - params['launchTemplate']['version'] = module.params['launch_template']['version'] - if module.params['launch_template']['name'] is not None: - params['launchTemplate']['name'] = module.params['launch_template']['name'] - if module.params['release_version'] is not None: - params['releaseVersion'] = module.params['release_version'] - if module.params['remote_access'] is not None: - params['remoteAccess'] = dict() - if module.params['remote_access']['ec2_ssh_key'] is not None: - params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key'] - if module.params['remote_access']['source_sg'] is not None: - params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg'] - if module.params['capacity_type'] is not None: - params['capacityType'] = module.params['capacity_type'].upper() - if module.params['labels'] is not None: - params['labels'] = module.params['labels'] - if module.params['taints'] is not None: - params['taints'] = module.params['taints'] - if module.params['update_config'] is not None: - params['updateConfig'] = dict() - if module.params['update_config']['max_unavailable'] is not None: - params['updateConfig']['maxUnavailable'] = module.params['update_config']['max_unavailable'] - if module.params['update_config']['max_unavailable_percentage'] is not None: - params['updateConfig']['maxUnavailablePercentage'] = module.params['update_config']['max_unavailable_percentage'] - if module.params['scaling_config'] is not None: - params['scalingConfig'] = snake_dict_to_camel_dict(module.params['scaling_config']) - - wait = module.params.get('wait') - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + params["nodegroupName"] = module.params["name"] + params["clusterName"] = module.params["cluster_name"] + params["nodeRole"] = module.params["node_role"] + params["subnets"] = module.params["subnets"] + params["tags"] = module.params["tags"] or {} + if module.params["ami_type"] is not None: + params["amiType"] = module.params["ami_type"] + if module.params["disk_size"] is not None: + params["diskSize"] = module.params["disk_size"] + if module.params["instance_types"] is not None: + params["instanceTypes"] = module.params["instance_types"] + if module.params["launch_template"] is not None: + params["launchTemplate"] = dict() + if module.params["launch_template"]["id"] is not None: + params["launchTemplate"]["id"] = module.params["launch_template"]["id"] + if module.params["launch_template"]["version"] is not None: + params["launchTemplate"]["version"] = module.params["launch_template"]["version"] + if module.params["launch_template"]["name"] is not None: + params["launchTemplate"]["name"] = module.params["launch_template"]["name"] + if module.params["release_version"] is not None: + params["releaseVersion"] = module.params["release_version"] + if module.params["remote_access"] is not None: + params["remoteAccess"] = dict() + if module.params["remote_access"]["ec2_ssh_key"] is not None: + params["remoteAccess"]["ec2SshKey"] = module.params["remote_access"]["ec2_ssh_key"] + if module.params["remote_access"]["source_sg"] is not None: + params["remoteAccess"]["sourceSecurityGroups"] = module.params["remote_access"]["source_sg"] + if module.params["capacity_type"] is not None: + params["capacityType"] = module.params["capacity_type"].upper() + if module.params["labels"] is not None: + params["labels"] = module.params["labels"] + if module.params["taints"] is not None: + params["taints"] = module.params["taints"] + if module.params["update_config"] is not None: + params["updateConfig"] = dict() + if module.params["update_config"]["max_unavailable"] is not None: + params["updateConfig"]["maxUnavailable"] = module.params["update_config"]["max_unavailable"] + if module.params["update_config"]["max_unavailable_percentage"] is not None: + params["updateConfig"]["maxUnavailablePercentage"] = module.params["update_config"][ + "max_unavailable_percentage" + ] + if module.params["scaling_config"] is not None: + params["scalingConfig"] = snake_dict_to_camel_dict(module.params["scaling_config"]) + + wait = module.params.get("wait") + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) if nodegroup: update_params = dict() - update_params['clusterName'] = params['clusterName'] - update_params['nodegroupName'] = params['nodegroupName'] + update_params["clusterName"] = params["clusterName"] + update_params["nodegroupName"] = params["nodegroupName"] - if 'launchTemplate' in nodegroup: + if "launchTemplate" in nodegroup: if compare_params_launch_template(module, params, nodegroup): - update_params['launchTemplate'] = params['launchTemplate'] + update_params["launchTemplate"] = params["launchTemplate"] if not module.check_mode: try: client.update_nodegroup_version(**update_params) @@ -554,10 +560,10 @@ def create_or_update_nodegroups(client, module): if compare_params(module, params, nodegroup): try: - if 'launchTemplate' in update_params: - update_params.pop('launchTemplate') - update_params['scalingConfig'] = params['scalingConfig'] - update_params['updateConfig'] = params['updateConfig'] + if "launchTemplate" in update_params: + update_params.pop("launchTemplate") + update_params["scalingConfig"] = params["scalingConfig"] + update_params["updateConfig"] = params["updateConfig"] if not module.check_mode: client.update_nodegroup_config(**update_params) @@ -569,15 +575,15 @@ def create_or_update_nodegroups(client, module): changed |= validate_tags(client, module, nodegroup) - changed |= validate_labels(client, module, nodegroup, params['labels']) + changed |= validate_labels(client, module, nodegroup, params["labels"]) - if 'taints' in nodegroup: - changed |= validate_taints(client, module, nodegroup, params['taints']) + if "taints" in nodegroup: + changed |= validate_taints(client, module, nodegroup, params["taints"]) if wait: - wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) + wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) module.exit_json(changed=changed, **camel_dict_to_snake_dict(nodegroup)) @@ -587,127 +593,172 @@ def create_or_update_nodegroups(client, module): try: nodegroup = client.create_nodegroup(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params['nodegroupName']) + module.fail_json_aws(e, msg=f"Couldn't create Nodegroup {params['nodegroupName']}.") if wait: - wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) module.exit_json(changed=True, **camel_dict_to_snake_dict(nodegroup)) def delete_nodegroups(client, module): - name = module.params.get('name') - clusterName = module.params['cluster_name'] + name = module.params.get("name") + clusterName = module.params["cluster_name"] existing = get_nodegroup(client, module, name, clusterName) - wait = module.params.get('wait') - if not existing or existing['status'] == 'DELETING': - module.exit_json(changed=False, msg='Nodegroup not exists or in DELETING status.') - if not module.check_mode: - try: - client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name) + wait = module.params.get("wait") + if not existing: + module.exit_json(changed=False, msg="Nodegroup '{name}' does not exist") + + if existing["status"] == "DELETING": if wait: - wait_until(client, module, 'nodegroup_deleted', name, clusterName) + wait_until(client, module, "nodegroup_deleted", name, clusterName) + module.exit_json(changed=False, msg="Nodegroup '{name}' deletion complete") + module.exit_json(changed=False, msg="Nodegroup '{name}' already in DELETING state") + + if module.check_mode: + module.exit_json(changed=True, msg="Nodegroup '{name}' deletion would be started (check mode)") + + try: + client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Couldn't delete Nodegroup '{name}'.") + + if wait: + wait_until(client, module, "nodegroup_deleted", name, clusterName) + module.exit_json(changed=True, msg="Nodegroup '{name}' deletion complete") - module.exit_json(changed=True) + module.exit_json(changed=True, msg="Nodegroup '{name}' deletion started") def get_nodegroup(client, module, nodegroup_name, cluster_name): try: - return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)['nodegroup'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)["nodegroup"] + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get Nodegroup {nodegroup_name}.") def wait_until(client, module, waiter_name, nodegroup_name, cluster_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) try: - waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="An error occurred waiting") def main(): argument_spec = dict( - name=dict(type='str', required=True), - cluster_name=dict(type='str', required=True), + name=dict(type="str", required=True), + cluster_name=dict(type="str", required=True), node_role=dict(), - subnets=dict(type='list', elements='str'), - scaling_config=dict(type='dict', default={'min_size': 1, 'max_size': 2, 'desired_size': 1}, options=dict( - min_size=dict(type='int'), - max_size=dict(type='int'), - desired_size=dict(type='int') - )), - disk_size=dict(type='int'), - instance_types=dict(type='list', elements='str'), - ami_type=dict(choices=['AL2_x86_64', 'AL2_x86_64_GPU', 'AL2_ARM_64', 'CUSTOM', 'BOTTLEROCKET_ARM_64', 'BOTTLEROCKET_x86_64']), - remote_access=dict(type='dict', options=dict( - ec2_ssh_key=dict(no_log=True), - source_sg=dict(type='list', elements='str') - )), - update_config=dict(type='dict', default={'max_unavailable': 1}, options=dict( - max_unavailable=dict(type='int'), - max_unavailable_percentage=dict(type='int') - )), - labels=dict(type='dict', default={}), - taints=dict(type='list', elements='dict', default=[], options=dict( - key=dict(type='str', no_log=False,), - value=dict(type='str'), - effect=dict(type='str', choices=['NO_SCHEDULE', 'NO_EXECUTE', 'PREFER_NO_SCHEDULE']) - )), - launch_template=dict(type='dict', options=dict( - name=dict(type='str'), - version=dict(type='str'), - id=dict(type='str') - )), - capacity_type=dict(choices=['ON_DEMAND', 'SPOT'], default='ON_DEMAND'), + subnets=dict(type="list", elements="str"), + scaling_config=dict( + type="dict", + default={"min_size": 1, "max_size": 2, "desired_size": 1}, + options=dict( + min_size=dict(type="int"), + max_size=dict(type="int"), + desired_size=dict(type="int"), + ), + ), + disk_size=dict(type="int"), + instance_types=dict(type="list", elements="str"), + ami_type=dict( + choices=[ + "AL2_x86_64", + "AL2_x86_64_GPU", + "AL2_ARM_64", + "CUSTOM", + "BOTTLEROCKET_ARM_64", + "BOTTLEROCKET_x86_64", + ] + ), + remote_access=dict( + type="dict", + options=dict( + ec2_ssh_key=dict(no_log=True), + source_sg=dict(type="list", elements="str"), + ), + ), + update_config=dict( + type="dict", + default={"max_unavailable": 1}, + options=dict( + max_unavailable=dict(type="int"), + max_unavailable_percentage=dict(type="int"), + ), + ), + labels=dict(type="dict", default={}), + taints=dict( + type="list", + elements="dict", + default=[], + options=dict( + key=dict( + type="str", + no_log=False, + ), + value=dict(type="str"), + effect=dict(type="str", choices=["NO_SCHEDULE", "NO_EXECUTE", "PREFER_NO_SCHEDULE"]), + ), + ), + launch_template=dict( + type="dict", + options=dict( + name=dict(type="str"), + version=dict(type="str"), + id=dict(type="str"), + ), + ), + capacity_type=dict(choices=["ON_DEMAND", "SPOT"], default="ON_DEMAND"), release_version=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['absent', 'present'], default='present'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["absent", "present"], default="present"), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['node_role', 'subnets']]], + required_if=[["state", "present", ["node_role", "subnets"]]], mutually_exclusive=[ - ('launch_template', 'instance_types'), - ('launch_template', 'disk_size'), - ('launch_template', 'remote_access'), - ('launch_template', 'ami_type') + ("launch_template", "instance_types"), + ("launch_template", "disk_size"), + ("launch_template", "remote_access"), + ("launch_template", "ami_type"), ], supports_check_mode=True, ) - if module.params['launch_template'] is None: - if module.params['disk_size'] is None: - module.params['disk_size'] = 20 - if module.params['ami_type'] is None: - module.params['ami_type'] = "AL2_x86_64" - if module.params['instance_types'] is None: - module.params['instance_types'] = ["t3.medium"] + if module.params["launch_template"] is None: + if module.params["disk_size"] is None: + module.params["disk_size"] = 20 + if module.params["ami_type"] is None: + module.params["ami_type"] = "AL2_x86_64" + if module.params["instance_types"] is None: + module.params["instance_types"] = ["t3.medium"] else: - if (module.params['launch_template']['id'] is None) and (module.params['launch_template']['name'] is None): - module.exit_json(changed=False, msg='To use launch_template, it is necessary to inform the id or name.') + if (module.params["launch_template"]["id"] is None) and (module.params["launch_template"]["name"] is None): + module.exit_json(changed=False, msg="To use launch_template, it is necessary to inform the id or name.") try: - client = module.client('eks') + client = module.client("eks") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't connect to AWS.") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": create_or_update_nodegroups(client, module) else: delete_nodegroups(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elasticache.py b/ansible_collections/community/aws/plugins/modules/elasticache.py index 454baafe3..d45509cb6 100644 --- a/ansible_collections/community/aws/plugins/modules/elasticache.py +++ b/ansible_collections/community/aws/plugins/modules/elasticache.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elasticache version_added: 1.0.0 @@ -15,7 +12,8 @@ short_description: Manage cache clusters in Amazon ElastiCache description: - Manage cache clusters in Amazon ElastiCache. - Returns information about the specified cache cluster. -author: "Jim Dalton (@jsdalton)" +author: + - "Jim Dalton (@jsdalton)" options: state: description: @@ -97,15 +95,15 @@ options: - Defaults to C(false). type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: Basic example community.aws.elasticache: @@ -113,7 +111,7 @@ EXAMPLES = r""" state: present engine: memcached cache_engine_version: 1.4.14 - node_type: cache.m1.small + node_type: cache.m3.small num_nodes: 1 cache_port: 11211 cache_security_groups: @@ -130,8 +128,8 @@ EXAMPLES = r""" community.aws.elasticache: name: "test-please-delete" state: rebooted - """ + from time import sleep try: @@ -139,21 +137,34 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class ElastiCacheManager(object): - """Handles elasticache creation and destruction""" +class ElastiCacheManager: - EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] + """Handles elasticache creation and destruction""" - def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, cache_parameter_group, cache_subnet_group, - cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs): + EXIST_STATUSES = ["available", "creating", "rebooting", "modifying"] + + def __init__( + self, + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ): self.module = module self.name = name self.engine = engine.lower() @@ -169,12 +180,9 @@ class ElastiCacheManager(object): self.wait = wait self.hard_modify = hard_modify - self.region = region - self.aws_connect_kwargs = aws_connect_kwargs - self.changed = False self.data = None - self.status = 'gone' + self.status = "gone" self.conn = self._get_elasticache_connection() self._refresh_data() @@ -199,32 +207,33 @@ class ElastiCacheManager(object): def create(self): """Create an ElastiCache cluster""" - if self.status == 'available': + if self.status == "available": return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") else: - msg = "'%s' is currently deleting. Cannot create." - self.module.fail_json(msg=msg % self.name) - - kwargs = dict(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeType=self.node_type, - Engine=self.engine, - EngineVersion=self.cache_engine_version, - CacheSecurityGroupNames=self.cache_security_groups, - SecurityGroupIds=self.security_group_ids, - CacheParameterGroupName=self.cache_parameter_group, - CacheSubnetGroupName=self.cache_subnet_group) + self.module.fail_json(msg=f"'{self.name}' is currently deleting. Cannot create.") + + kwargs = dict( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeType=self.node_type, + Engine=self.engine, + EngineVersion=self.cache_engine_version, + CacheSecurityGroupNames=self.cache_security_groups, + SecurityGroupIds=self.security_group_ids, + CacheParameterGroupName=self.cache_parameter_group, + CacheSubnetGroupName=self.cache_subnet_group, + ) if self.cache_port is not None: - kwargs['Port'] = self.cache_port + kwargs["Port"] = self.cache_port if self.zone is not None: - kwargs['PreferredAvailabilityZone'] = self.zone + kwargs["PreferredAvailabilityZone"] = self.zone try: self.conn.create_cache_cluster(**kwargs) @@ -236,45 +245,43 @@ class ElastiCacheManager(object): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return True def delete(self): """Destroy an ElastiCache cluster""" - if self.status == 'gone': + if self.status == "gone": return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot delete." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot delete.") try: response = self.conn.delete_cache_cluster(CacheClusterId=self.name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to delete cache cluster") - cache_cluster_data = response['CacheCluster'] + cache_cluster_data = response["CacheCluster"] self._refresh_data(cache_cluster_data) self.changed = True if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") def sync(self): """Sync settings to cluster if required""" if not self.exists(): - msg = "'%s' is %s. Cannot sync." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot sync.") - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: # Cluster can only be synced if available. If we can't wait # for this, then just be done. @@ -282,11 +289,13 @@ class ElastiCacheManager(object): if self._requires_destroy_and_create(): if not self.hard_modify: - msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'hard_modify' must be set to true to proceed." + ) if not self.wait: - msg = "'%s' requires destructive modification. 'wait' must be set to true." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'wait' must be set to true to proceed." + ) self.delete() self.create() return @@ -298,14 +307,16 @@ class ElastiCacheManager(object): """Modify the cache cluster. Note it's only possible to modify a few select options.""" nodes_to_remove = self._get_nodes_to_remove() try: - self.conn.modify_cache_cluster(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeIdsToRemove=nodes_to_remove, - CacheSecurityGroupNames=self.cache_security_groups, - CacheParameterGroupName=self.cache_parameter_group, - SecurityGroupIds=self.security_group_ids, - ApplyImmediately=True, - EngineVersion=self.cache_engine_version) + self.conn.modify_cache_cluster( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeIdsToRemove=nodes_to_remove, + CacheSecurityGroupNames=self.cache_security_groups, + CacheParameterGroupName=self.cache_parameter_group, + SecurityGroupIds=self.security_group_ids, + ApplyImmediately=True, + EngineVersion=self.cache_engine_version, + ) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to modify cache cluster") @@ -313,27 +324,24 @@ class ElastiCacheManager(object): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def reboot(self): """Reboot the cache cluster""" if not self.exists(): - msg = "'%s' is %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status == 'rebooting': + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot reboot.") + if self.status == "rebooting": return - if self.status in ['creating', 'modifying']: + if self.status in ["creating", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot reboot.") # Collect ALL nodes for reboot - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] try: - self.conn.reboot_cache_cluster(CacheClusterId=self.name, - CacheNodeIdsToReboot=cache_node_ids) + self.conn.reboot_cache_cluster(CacheClusterId=self.name, CacheNodeIdsToReboot=cache_node_ids) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to reboot cache cluster") @@ -341,36 +349,28 @@ class ElastiCacheManager(object): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def get_info(self): """Return basic info about the cache cluster""" - info = { - 'name': self.name, - 'status': self.status - } + info = {"name": self.name, "status": self.status} if self.data: - info['data'] = self.data + info["data"] = self.data return info def _wait_for_status(self, awaited_status): """Wait for status to change from present status to awaited_status""" - status_map = { - 'creating': 'available', - 'rebooting': 'available', - 'modifying': 'available', - 'deleting': 'gone' - } + status_map = {"creating": "available", "rebooting": "available", "modifying": "available", "deleting": "gone"} if self.status == awaited_status: # No need to wait, we're already done return if status_map[self.status] != awaited_status: - msg = "Invalid awaited status. '%s' cannot transition to '%s'" - self.module.fail_json(msg=msg % (self.status, awaited_status)) + self.module.fail_json( + msg=f"Invalid awaited status. '{self.status}' cannot transition to '{awaited_status}'" + ) if awaited_status not in set(status_map.values()): - msg = "'%s' is not a valid awaited status." - self.module.fail_json(msg=msg % awaited_status) + self.module.fail_json(msg=f"'{awaited_status}' is not a valid awaited status.") while True: sleep(1) @@ -381,27 +381,24 @@ class ElastiCacheManager(object): def _requires_modification(self): """Check if cluster requires (nondestructive) modification""" # Check modifiable data attributes - modifiable_data = { - 'NumCacheNodes': self.num_nodes, - 'EngineVersion': self.cache_engine_version - } + modifiable_data = {"NumCacheNodes": self.num_nodes, "EngineVersion": self.cache_engine_version} for key, value in modifiable_data.items(): if value is not None and value and self.data[key] != value: return True # Check cache security groups cache_security_groups = [] - for sg in self.data['CacheSecurityGroups']: - cache_security_groups.append(sg['CacheSecurityGroupName']) + for sg in self.data["CacheSecurityGroups"]: + cache_security_groups.append(sg["CacheSecurityGroupName"]) if set(cache_security_groups) != set(self.cache_security_groups): return True # check vpc security groups if self.security_group_ids: vpc_security_groups = [] - security_groups = self.data.get('SecurityGroups', []) + security_groups = self.data.get("SecurityGroups", []) for sg in security_groups: - vpc_security_groups.append(sg['SecurityGroupId']) + vpc_security_groups.append(sg["SecurityGroupId"]) if set(vpc_security_groups) != set(self.security_group_ids): return True @@ -412,13 +409,13 @@ class ElastiCacheManager(object): Check whether a destroy and create is required to synchronize cluster. """ unmodifiable_data = { - 'node_type': self.data['CacheNodeType'], - 'engine': self.data['Engine'], - 'cache_port': self._get_port() + "node_type": self.data["CacheNodeType"], + "engine": self.data["Engine"], + "cache_port": self._get_port(), } # Only check for modifications if zone is specified if self.zone is not None: - unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone'] + unmodifiable_data["zone"] = self.data["PreferredAvailabilityZone"] for key, value in unmodifiable_data.items(): if getattr(self, key) is not None and getattr(self, key) != value: return True @@ -427,18 +424,18 @@ class ElastiCacheManager(object): def _get_elasticache_connection(self): """Get an elasticache connection""" try: - return self.module.client('elasticache') + return self.module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to connect to AWS') + self.module.fail_json_aws(e, msg="Failed to connect to AWS") def _get_port(self): """Get the port. Where this information is retrieved from is engine dependent.""" - if self.data['Engine'] == 'memcached': - return self.data['ConfigurationEndpoint']['Port'] - elif self.data['Engine'] == 'redis': + if self.data["Engine"] == "memcached": + return self.data["ConfigurationEndpoint"]["Port"] + elif self.data["Engine"] == "redis": # Redis only supports a single node (presently) so just use # the first and only - return self.data['CacheNodes'][0]['Endpoint']['Port'] + return self.data["CacheNodes"][0]["Endpoint"]["Port"] def _refresh_data(self, cache_cluster_data=None): """Refresh data about this cache cluster""" @@ -446,104 +443,110 @@ class ElastiCacheManager(object): if cache_cluster_data is None: try: response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True) - except is_boto3_error_code('CacheClusterNotFound'): + except is_boto3_error_code("CacheClusterNotFound"): self.data = None - self.status = 'gone' + self.status = "gone" return except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Failed to describe cache clusters") - cache_cluster_data = response['CacheClusters'][0] + cache_cluster_data = response["CacheClusters"][0] self.data = cache_cluster_data - self.status = self.data['CacheClusterStatus'] + self.status = self.data["CacheClusterStatus"] # The documentation for elasticache lies -- status on rebooting is set # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it # here to make status checks etc. more sane. - if self.status == 'rebooting cache cluster nodes': - self.status = 'rebooting' + if self.status == "rebooting cache cluster nodes": + self.status = "rebooting" def _get_nodes_to_remove(self): """If there are nodes to remove, it figures out which need to be removed""" - num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes + num_nodes_to_remove = self.data["NumCacheNodes"] - self.num_nodes if num_nodes_to_remove <= 0: return [] if not self.hard_modify: - msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." + ) - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] return cache_node_ids[-num_nodes_to_remove:] def main(): - """ elasticache ansible module """ + """elasticache ansible module""" argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'rebooted']), + state=dict(required=True, choices=["present", "absent", "rebooted"]), name=dict(required=True), - engine=dict(default='memcached'), + engine=dict(default="memcached"), cache_engine_version=dict(default=""), - node_type=dict(default='cache.t2.small'), - num_nodes=dict(default=1, type='int'), + node_type=dict(default="cache.t2.small"), + num_nodes=dict(default=1, type="int"), # alias for compat with the original PR 1950 - cache_parameter_group=dict(default="", aliases=['parameter_group']), - cache_port=dict(type='int'), + cache_parameter_group=dict(default="", aliases=["parameter_group"]), + cache_port=dict(type="int"), cache_subnet_group=dict(default=""), - cache_security_groups=dict(default=[], type='list', elements='str'), - security_group_ids=dict(default=[], type='list', elements='str'), + cache_security_groups=dict(default=[], type="list", elements="str"), + security_group_ids=dict(default=[], type="list", elements="str"), zone=dict(), - wait=dict(default=True, type='bool'), - hard_modify=dict(type='bool'), + wait=dict(default=True, type="bool"), + hard_modify=dict(type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, ) - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - - name = module.params['name'] - state = module.params['state'] - engine = module.params['engine'] - cache_engine_version = module.params['cache_engine_version'] - node_type = module.params['node_type'] - num_nodes = module.params['num_nodes'] - cache_port = module.params['cache_port'] - cache_subnet_group = module.params['cache_subnet_group'] - cache_security_groups = module.params['cache_security_groups'] - security_group_ids = module.params['security_group_ids'] - zone = module.params['zone'] - wait = module.params['wait'] - hard_modify = module.params['hard_modify'] - cache_parameter_group = module.params['cache_parameter_group'] + name = module.params["name"] + state = module.params["state"] + engine = module.params["engine"] + cache_engine_version = module.params["cache_engine_version"] + node_type = module.params["node_type"] + num_nodes = module.params["num_nodes"] + cache_port = module.params["cache_port"] + cache_subnet_group = module.params["cache_subnet_group"] + cache_security_groups = module.params["cache_security_groups"] + security_group_ids = module.params["security_group_ids"] + zone = module.params["zone"] + wait = module.params["wait"] + hard_modify = module.params["hard_modify"] + cache_parameter_group = module.params["cache_parameter_group"] if cache_subnet_group and cache_security_groups: module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups") - if state == 'present' and not num_nodes: + if state == "present" and not num_nodes: module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") - elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, - num_nodes, cache_port, - cache_parameter_group, - cache_subnet_group, - cache_security_groups, - security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs) + elasticache_manager = ElastiCacheManager( + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ) - if state == 'present': + if state == "present": elasticache_manager.ensure_present() - elif state == 'absent': + elif state == "absent": elasticache_manager.ensure_absent() - elif state == 'rebooted': + elif state == "rebooted": elasticache_manager.ensure_rebooted() - facts_result = dict(changed=elasticache_manager.changed, - elasticache=elasticache_manager.get_info()) + facts_result = dict(changed=elasticache_manager.changed, elasticache=elasticache_manager.get_info()) module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_info.py b/ansible_collections/community/aws/plugins/modules/elasticache_info.py index f6c34629e..50a8cb5ff 100644 --- a/ansible_collections/community/aws/plugins/modules/elasticache_info.py +++ b/ansible_collections/community/aws/plugins/modules/elasticache_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: elasticache_info short_description: Retrieve information for AWS ElastiCache clusters version_added: 1.0.0 @@ -20,21 +18,21 @@ options: author: - Will Thames (@willthames) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: obtain all ElastiCache information community.aws.elasticache_info: - name: obtain all information for a single ElastiCache cluster community.aws.elasticache_info: name: test_elasticache -''' +""" -RETURN = ''' +RETURN = r""" elasticache_clusters: description: List of ElastiCache clusters. returned: always @@ -402,93 +400,82 @@ elasticache_clusters: sample: Application: web Environment: test -''' - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + @AWSRetry.exponential_backoff() def describe_cache_clusters_with_backoff(client, cluster_id=None): - paginator = client.get_paginator('describe_cache_clusters') + paginator = client.get_paginator("describe_cache_clusters") params = dict(ShowCacheNodeInfo=True) if cluster_id: - params['CacheClusterId'] = cluster_id + params["CacheClusterId"] = cluster_id try: response = paginator.paginate(**params).build_full_result() - except is_boto3_error_code('CacheClusterNotFound'): + except is_boto3_error_code("CacheClusterNotFound"): return [] - return response['CacheClusters'] + return response["CacheClusters"] @AWSRetry.exponential_backoff() def describe_replication_group_with_backoff(client, replication_group_id): try: response = client.describe_replication_groups(ReplicationGroupId=replication_group_id) - except is_boto3_error_code('ReplicationGroupNotFoundFault'): + except is_boto3_error_code("ReplicationGroupNotFoundFault"): return None - return response['ReplicationGroups'][0] + return response["ReplicationGroups"][0] @AWSRetry.exponential_backoff() def get_elasticache_tags_with_backoff(client, cluster_id): - return client.list_tags_for_resource(ResourceName=cluster_id)['TagList'] - - -def get_aws_account_id(module): - try: - client = module.client('sts') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Can't authorize connection") - - try: - return client.get_caller_identity()['Account'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't obtain AWS account id") + return client.list_tags_for_resource(ResourceName=cluster_id)["TagList"] def get_elasticache_clusters(client, module): region = module.region try: - clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name')) + clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get("name")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain cache cluster info") - account_id = get_aws_account_id(module) + account_id, partition = get_aws_account_info(module) results = [] for cluster in clusters: - cluster = camel_dict_to_snake_dict(cluster) - arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id']) + arn = f"arn:{partition}:elasticache:{region}:{account_id}:cluster:{cluster['cache_cluster_id']}" try: tags = get_elasticache_tags_with_backoff(client, arn) except is_boto3_error_code("CacheClusterNotFound"): # e.g: Cluster was listed but is in deleting state continue except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") + module.fail_json_aws(e, msg=f"Couldn't get tags for cluster {cluster['cache_cluster_id']}") - cluster['tags'] = boto3_tag_list_to_ansible_dict(tags) + cluster["tags"] = boto3_tag_list_to_ansible_dict(tags) - if cluster.get('replication_group_id', None): + if cluster.get("replication_group_id", None): try: - replication_group = describe_replication_group_with_backoff(client, cluster['replication_group_id']) + replication_group = describe_replication_group_with_backoff(client, cluster["replication_group_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain replication group info") if replication_group is not None: replication_group = camel_dict_to_snake_dict(replication_group) - cluster['replication_group'] = replication_group + cluster["replication_group"] = replication_group results.append(cluster) return results @@ -500,10 +487,10 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('elasticache') + client = module.client("elasticache") module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py b/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py index 247dd0bab..fa7f87a2f 100644 --- a/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py +++ b/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elasticache_parameter_group version_added: 1.0.0 @@ -14,11 +12,8 @@ short_description: Manage cache parameter groups in Amazon ElastiCache. description: - Manage cache security groups in Amazon ElastiCache. - Returns information about the specified cache cluster. -author: "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 +author: + - "Sloane Hertel (@s-hertel)" options: group_family: @@ -47,13 +42,17 @@ options: description: - A user-specified dictionary of parameters to reset or modify for the cache parameter group. type: dict -''' -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. ---- -- hosts: localhost +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Create, modify and delete a parameter group + hosts: localhost connection: local tasks: - name: 'Create a test parameter group' @@ -66,7 +65,7 @@ EXAMPLES = """ community.aws.elasticache_parameter_group: name: 'test-param-group' values: - activerehashing: yes + activerehashing: true client-output-buffer-limit-normal-hard-limit: 4 state: 'present' - name: 'Reset all modifiable parameters for the test parameter group' @@ -79,7 +78,7 @@ EXAMPLES = """ state: 'absent' """ -RETURN = """ +RETURN = r""" elasticache: description: cache parameter group information and response metadata returned: always @@ -115,13 +114,15 @@ from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def create(module, conn, name, group_family, description): - """ Create ElastiCache parameter group. """ + """Create ElastiCache parameter group.""" try: - response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description) + response = conn.create_cache_parameter_group( + CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create cache parameter group.") @@ -129,7 +130,7 @@ def create(module, conn, name, group_family, description): def delete(module, conn, name): - """ Delete ElastiCache parameter group. """ + """Delete ElastiCache parameter group.""" try: conn.delete_cache_parameter_group(CacheParameterGroupName=name) response = {} @@ -140,10 +141,10 @@ def delete(module, conn, name): def make_current_modifiable_param_dict(module, conn, name): - """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" + """Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" current_info = get_info(conn, name) if current_info is False: - module.fail_json(msg="Could not connect to the cache parameter group %s." % name) + module.fail_json(msg=f"Could not connect to the cache parameter group {name}.") parameters = current_info["Parameters"] modifiable_params = {} @@ -157,7 +158,7 @@ def make_current_modifiable_param_dict(module, conn, name): def check_valid_modification(module, values, modifiable_params): - """ Check if the parameters and values in values are valid. """ + """Check if the parameters and values in values are valid.""" changed_with_update = False for parameter in values: @@ -165,7 +166,9 @@ def check_valid_modification(module, values, modifiable_params): # check valid modifiable parameters if parameter not in modifiable_params: - module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys())) + module.fail_json( + msg=f"{parameter} is not a modifiable parameter. Valid parameters to modify are: {modifiable_params.keys()}." + ) # check allowed datatype for modified parameters str_to_type = {"integer": int, "string": string_types} @@ -180,18 +183,27 @@ def check_valid_modification(module, values, modifiable_params): if isinstance(new_value, bool): values[parameter] = 1 if new_value else 0 else: - module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % - (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + module.fail_json( + msg=( + f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter" + f" {parameter}. Expected a type {modifiable_params[parameter][1]}." + ) + ) else: - module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % - (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + module.fail_json( + msg=( + f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter {parameter}." + f" Expected a type {modifiable_params[parameter][1]}." + ) + ) # check allowed values for modifiable parameters choices = modifiable_params[parameter][0] if choices: if not (to_text(new_value) in choices or isinstance(new_value, int)): - module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." % - (new_value, parameter, choices)) + module.fail_json( + msg=f"{new_value} is not an allowed value for the parameter {parameter}. Valid parameters are: {choices}." + ) # check if a new value is different from current value if to_text(values[parameter]) != modifiable_params[parameter][2]: @@ -201,7 +213,7 @@ def check_valid_modification(module, values, modifiable_params): def check_changed_parameter_values(values, old_parameters, new_parameters): - """ Checking if the new values are different than the old values. """ + """Checking if the new values are different than the old values.""" changed_with_update = False # if the user specified parameters to reset, only check those for change @@ -221,21 +233,23 @@ def check_changed_parameter_values(values, old_parameters, new_parameters): def modify(module, conn, name, values): - """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """ + """Modify ElastiCache parameter group to reflect the new information if it differs from the current.""" # compares current group parameters with the parameters we've specified to to a value to see if this will change the group format_parameters = [] for key in values: value = to_text(values[key]) - format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + format_parameters.append({"ParameterName": key, "ParameterValue": value}) try: - response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters) + response = conn.modify_cache_parameter_group( + CacheParameterGroupName=name, ParameterNameValues=format_parameters + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to modify cache parameter group.") return response def reset(module, conn, name, values): - """ Reset ElastiCache parameter group if the current information is different from the new information. """ + """Reset ElastiCache parameter group if the current information is different from the new information.""" # used to compare with the reset parameters' dict to see if there have been changes old_parameters_dict = make_current_modifiable_param_dict(module, conn, name) @@ -247,12 +261,14 @@ def reset(module, conn, name, values): format_parameters = [] for key in values: value = to_text(values[key]) - format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + format_parameters.append({"ParameterName": key, "ParameterValue": value}) else: all_parameters = True try: - response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters) + response = conn.reset_cache_parameter_group( + CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to reset cache parameter group.") @@ -264,7 +280,7 @@ def reset(module, conn, name, values): def get_info(conn, name): - """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """ + """Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access.""" try: data = conn.describe_cache_parameters(CacheParameterGroupName=name) return data @@ -274,36 +290,50 @@ def get_info(conn, name): def main(): argument_spec = dict( - group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x']), - name=dict(required=True, type='str'), - description=dict(default='', type='str'), - state=dict(required=True, choices=['present', 'absent', 'reset']), - values=dict(type='dict'), + group_family=dict( + type="str", + choices=[ + "memcached1.4", + "memcached1.5", + "redis2.6", + "redis2.8", + "redis3.2", + "redis4.0", + "redis5.0", + "redis6.x", + ], + ), + name=dict(required=True, type="str"), + description=dict(default="", type="str"), + state=dict(required=True, choices=["present", "absent", "reset"]), + values=dict(type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - parameter_group_family = module.params.get('group_family') - parameter_group_name = module.params.get('name') - group_description = module.params.get('description') - state = module.params.get('state') - values = module.params.get('values') + parameter_group_family = module.params.get("group_family") + parameter_group_name = module.params.get("name") + group_description = module.params.get("description") + state = module.params.get("state") + values = module.params.get("values") try: - connection = module.client('elasticache') + connection = module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") exists = get_info(connection, parameter_group_name) # check that the needed requirements are available - if state == 'present' and not (exists or parameter_group_family): + if state == "present" and not (exists or parameter_group_family): module.fail_json(msg="Creating a group requires a family group.") - elif state == 'reset' and not exists: - module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name) + elif state == "reset" and not exists: + module.fail_json( + msg=f"No group {parameter_group_name} to reset. Please create the group before using the state 'reset'." + ) # Taking action changed = False - if state == 'present': + if state == "present": if exists: # confirm that the group exists without any actions if not values: @@ -316,19 +346,21 @@ def main(): response = modify(module, connection, parameter_group_name, values) # create group else: - response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description) + response, changed = create( + module, connection, parameter_group_name, parameter_group_family, group_description + ) if values: modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) changed, values = check_valid_modification(module, values, modifiable_params) response = modify(module, connection, parameter_group_name, values) - elif state == 'absent': + elif state == "absent": if exists: # delete group response, changed = delete(module, connection, parameter_group_name) else: response = {} changed = False - elif state == 'reset': + elif state == "reset": response, changed = reset(module, connection, parameter_group_name, values) facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response)) @@ -336,5 +368,5 @@ def main(): module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py b/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py index fa18b80c0..0816527fb 100644 --- a/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py +++ b/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elasticache_snapshot version_added: 1.0.0 @@ -14,11 +12,8 @@ short_description: Manage cache snapshots in Amazon ElastiCache description: - Manage cache snapshots in Amazon ElastiCache. - Returns information about the specified snapshot. -author: "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 +author: + - "Sloane Hertel (@s-hertel)" options: name: description: @@ -47,11 +42,14 @@ options: description: - The s3 bucket to which the snapshot is exported. type: str -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: 'Create a snapshot' community.aws.elasticache_snapshot: @@ -61,7 +59,7 @@ EXAMPLES = """ replication_id: '{{ replication }}' """ -RETURN = """ +RETURN = r""" response_metadata: description: response metadata about the snapshot returned: always @@ -117,18 +115,19 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def create(module, connection, replication_id, cluster_id, name): - """ Create an ElastiCache backup. """ + """Create an ElastiCache backup.""" try: - response = connection.create_snapshot(ReplicationGroupId=replication_id, - CacheClusterId=cluster_id, - SnapshotName=name) + response = connection.create_snapshot( + ReplicationGroupId=replication_id, CacheClusterId=cluster_id, SnapshotName=name + ) changed = True - except is_boto3_error_code('SnapshotAlreadyExistsFault'): + except is_boto3_error_code("SnapshotAlreadyExistsFault"): response = {} changed = False except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except @@ -137,11 +136,9 @@ def create(module, connection, replication_id, cluster_id, name): def copy(module, connection, name, target, bucket): - """ Copy an ElastiCache backup. """ + """Copy an ElastiCache backup.""" try: - response = connection.copy_snapshot(SourceSnapshotName=name, - TargetSnapshotName=target, - TargetBucket=bucket) + response = connection.copy_snapshot(SourceSnapshotName=name, TargetSnapshotName=target, TargetBucket=bucket) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to copy the snapshot.") @@ -149,16 +146,20 @@ def copy(module, connection, name, target, bucket): def delete(module, connection, name): - """ Delete an ElastiCache backup. """ + """Delete an ElastiCache backup.""" try: response = connection.delete_snapshot(SnapshotName=name) changed = True - except is_boto3_error_code('SnapshotNotFoundFault'): + except is_boto3_error_code("SnapshotNotFoundFault"): response = {} changed = False - except is_boto3_error_code('InvalidSnapshotState'): # pylint: disable=duplicate-except - module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." - "You may need to wait a few minutes.") + except is_boto3_error_code("InvalidSnapshotState"): # pylint: disable=duplicate-except + module.fail_json( + msg=( + "Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow" + " deletion.You may need to wait a few minutes." + ) + ) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete the snapshot.") return response, changed @@ -166,38 +167,38 @@ def delete(module, connection, name): def main(): argument_spec = dict( - name=dict(required=True, type='str'), - state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), - replication_id=dict(type='str'), - cluster_id=dict(type='str'), - target=dict(type='str'), - bucket=dict(type='str'), + name=dict(required=True, type="str"), + state=dict(required=True, type="str", choices=["present", "absent", "copy"]), + replication_id=dict(type="str"), + cluster_id=dict(type="str"), + target=dict(type="str"), + bucket=dict(type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - name = module.params.get('name') - state = module.params.get('state') - replication_id = module.params.get('replication_id') - cluster_id = module.params.get('cluster_id') - target = module.params.get('target') - bucket = module.params.get('bucket') + name = module.params.get("name") + state = module.params.get("state") + replication_id = module.params.get("replication_id") + cluster_id = module.params.get("cluster_id") + target = module.params.get("target") + bucket = module.params.get("bucket") try: - connection = module.client('elasticache') + connection = module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") changed = False response = {} - if state == 'present': + if state == "present": if not all((replication_id, cluster_id)): module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'") response, changed = create(module, connection, replication_id, cluster_id, name) - elif state == 'absent': + elif state == "absent": response, changed = delete(module, connection, name) - elif state == 'copy': + elif state == "copy": if not all((target, bucket)): module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.") response, changed = copy(module, connection, name, target, bucket) @@ -207,5 +208,5 @@ def main(): module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py b/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py index 0f5f5e75e..f7740e696 100644 --- a/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py +++ b/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py @@ -1,18 +1,16 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elasticache_subnet_group version_added: 1.0.0 short_description: manage ElastiCache subnet groups description: - - Creates, modifies, and deletes ElastiCache subnet groups. + - Creates, modifies, and deletes ElastiCache subnet groups. options: state: description: @@ -40,12 +38,12 @@ options: author: - "Tim Mahoney (@timmahoney)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add or change a subnet group community.aws.elasticache_subnet_group: state: present @@ -59,9 +57,9 @@ EXAMPLES = r''' community.aws.elasticache_subnet_group: state: absent name: norwegian-blue -''' +""" -RETURN = r''' +RETURN = r""" cache_subnet_group: description: Description of the Elasticache Subnet Group. returned: always @@ -95,7 +93,7 @@ cache_subnet_group: sample: - subnet-aaaaaaaa - subnet-bbbbbbbb -''' +""" try: import botocore @@ -104,9 +102,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_subnet_group(name): @@ -114,10 +113,13 @@ def get_subnet_group(name): groups = client.describe_cache_subnet_groups( aws_retry=True, CacheSubnetGroupName=name, - )['CacheSubnetGroups'] - except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + )["CacheSubnetGroups"] + except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe subnet group") if not groups: @@ -131,19 +133,18 @@ def get_subnet_group(name): subnet_group = camel_dict_to_snake_dict(groups[0]) - subnet_group['name'] = subnet_group['cache_subnet_group_name'] - subnet_group['description'] = subnet_group['cache_subnet_group_description'] + subnet_group["name"] = subnet_group["cache_subnet_group_name"] + subnet_group["description"] = subnet_group["cache_subnet_group_description"] - subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) - subnet_group['subnet_ids'] = subnet_ids + subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) + subnet_group["subnet_ids"] = subnet_ids return subnet_group def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + module.fail_json(msg="At least one subnet must be provided when creating a subnet group") if module.check_mode: return True @@ -164,13 +165,13 @@ def create_subnet_group(name, description, subnets): def update_subnet_group(subnet_group, name, description, subnets): update_params = dict() - if description and subnet_group['description'] != description: - update_params['CacheSubnetGroupDescription'] = description + if description and subnet_group["description"] != description: + update_params["CacheSubnetGroupDescription"] = description if subnets: - old_subnets = set(subnet_group['subnet_ids']) + old_subnets = set(subnet_group["subnet_ids"]) new_subnets = set(subnets) if old_subnets != new_subnets: - update_params['SubnetIds'] = list(subnets) + update_params["SubnetIds"] = list(subnets) if not update_params: return False @@ -191,7 +192,6 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): - if module.check_mode: return True @@ -201,20 +201,23 @@ def delete_subnet_group(name): CacheSubnetGroupName=name, ) return True - except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): # AWS is "eventually consistent", cope with the race conditions where # deletion hadn't completed when we ran describe return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete subnet group") def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(required=True), description=dict(required=False), - subnets=dict(required=False, type='list', elements='str'), + subnets=dict(required=False, type="list", elements="str"), ) global module @@ -225,17 +228,17 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') - name = module.params.get('name').lower() - description = module.params.get('description') - subnets = module.params.get('subnets') + state = module.params.get("state") + name = module.params.get("name").lower() + description = module.params.get("description") + subnets = module.params.get("subnets") - client = module.client('elasticache', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("elasticache", retry_decorator=AWSRetry.jittered_backoff()) subnet_group = get_subnet_group(name) changed = False - if state == 'present': + if state == "present": if not subnet_group: result = create_subnet_group(name, description, subnets) changed |= result @@ -252,5 +255,5 @@ def main(): module.exit_json(changed=changed, cache_subnet_group=subnet_group) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py b/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py index b5b32c178..1aaa4c4d8 100644 --- a/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py +++ b/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elasticbeanstalk_app version_added: 1.0.0 @@ -43,12 +41,12 @@ author: - Harpreet Singh (@hsingh) - Stephen Granger (@viper233) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create or update an application - community.aws.elasticbeanstalk_app: app_name: Sample_App @@ -59,10 +57,9 @@ EXAMPLES = ''' - community.aws.elasticbeanstalk_app: app_name: Sample_App state: absent +""" -''' - -RETURN = ''' +RETURN = r""" app: description: Beanstalk application. returned: always @@ -83,15 +80,16 @@ output: returned: in check mode type: str sample: App is up-to-date -''' +""" try: import botocore except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def describe_app(ebs, app_name, module): @@ -113,24 +111,24 @@ def list_apps(ebs, app_name, module): def check_app(ebs, app, module): - app_name = module.params['app_name'] - description = module.params['description'] - state = module.params['state'] - terminate_by_force = module.params['terminate_by_force'] + app_name = module.params["app_name"] + description = module.params["description"] + state = module.params["state"] + terminate_by_force = module.params["terminate_by_force"] result = {} - if state == 'present' and app is None: + if state == "present" and app is None: result = dict(changed=True, output="App would be created") - elif state == 'present' and app.get("Description", None) != description: + elif state == "present" and app.get("Description", None) != description: result = dict(changed=True, output="App would be updated", app=app) - elif state == 'present' and app.get("Description", None) == description: + elif state == "present" and app.get("Description", None) == description: result = dict(changed=False, output="App is up-to-date", app=app) - elif state == 'absent' and app is None: + elif state == "absent" and app is None: result = dict(changed=False, output="App does not exist", app={}) - elif state == 'absent' and app is not None: + elif state == "absent" and app is not None: result = dict(changed=True, output="App will be deleted", app=app) - elif state == 'absent' and app is not None and terminate_by_force is True: + elif state == "absent" and app is not None and terminate_by_force is True: result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app) module.exit_json(**result) @@ -146,37 +144,36 @@ def filter_empty(**kwargs): def main(): argument_spec = dict( - app_name=dict(aliases=['name'], type='str', required=False), + app_name=dict(aliases=["name"], type="str", required=False), description=dict(), - state=dict(choices=['present', 'absent'], default='present'), - terminate_by_force=dict(type='bool', default=False, required=False) + state=dict(choices=["present", "absent"], default="present"), + terminate_by_force=dict(type="bool", default=False, required=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - app_name = module.params['app_name'] - description = module.params['description'] - state = module.params['state'] - terminate_by_force = module.params['terminate_by_force'] + app_name = module.params["app_name"] + description = module.params["description"] + state = module.params["state"] + terminate_by_force = module.params["terminate_by_force"] if app_name is None: module.fail_json(msg='Module parameter "app_name" is required') result = {} - ebs = module.client('elasticbeanstalk') + ebs = module.client("elasticbeanstalk") app = describe_app(ebs, app_name, module) if module.check_mode: check_app(ebs, app, module) - module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.') + module.fail_json(msg="ASSERTION FAILURE: check_app() should not return control.") - if state == 'present': + if state == "present": if app is None: try: - create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, - Description=description)) + create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Could not create application") @@ -201,7 +198,7 @@ def main(): else: if app is None: - result = dict(changed=False, output='Application not found', app={}) + result = dict(changed=False, output="Application not found", app={}) else: try: if terminate_by_force: @@ -210,9 +207,12 @@ def main(): else: ebs.delete_application(ApplicationName=app_name) changed = True - except is_boto3_error_message('It is currently pending deletion'): + except is_boto3_error_message("It is currently pending deletion"): changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Cannot terminate app") result = dict(changed=changed, app=app) @@ -220,5 +220,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py b/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py index 4cbeb9589..5329e5b81 100644 --- a/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py +++ b/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py @@ -1,29 +1,16 @@ #!/usr/bin/python -# -# This is a free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This Ansible library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this library. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: elb_classic_lb_info version_added: 1.0.0 short_description: Gather information about EC2 Elastic Load Balancers in AWS description: - - Gather information about EC2 Elastic Load Balancers in AWS + - Gather information about EC2 Elastic Load Balancers in AWS author: - "Michael Schultz (@mjschultz)" - "Fernando Jose Pando (@nand0p)" @@ -35,12 +22,12 @@ options: elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Output format tries to match amazon.aws.ec2_elb_lb module input parameters @@ -63,17 +50,16 @@ EXAMPLES = r''' # Gather information about a set of ELBs - community.aws.elb_classic_lb_info: names: - - frontend-prod-elb - - backend-prod-elb + - frontend-prod-elb + - backend-prod-elb register: elb_info - ansible.builtin.debug: msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" +""" -''' - -RETURN = r''' +RETURN = r""" elbs: description: a list of load balancers returned: always @@ -137,20 +123,21 @@ elbs: - subnet-XXXXXXXX tags: {} vpc_id: vpc-c248fda4 -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict -) +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + MAX_AWS_RETRIES = 5 MAX_AWS_DELAY = 5 @@ -172,63 +159,79 @@ def list_elbs(connection, load_balancer_names): def describe_elb(connection, lb): description = camel_dict_to_snake_dict(lb) - name = lb['LoadBalancerName'] - instances = lb.get('Instances', []) - description['tags'] = get_tags(connection, name) - description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService') - description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService') - description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown') - description['attributes'] = get_lb_attributes(connection, name) + name = lb["LoadBalancerName"] + instances = lb.get("Instances", []) + description["tags"] = get_tags(connection, name) + description["instances_inservice"], description["instances_inservice_count"] = lb_instance_health( + connection, name, instances, "InService" + ) + description["instances_outofservice"], description["instances_outofservice_count"] = lb_instance_health( + connection, name, instances, "OutOfService" + ) + description["instances_unknownservice"], description["instances_unknownservice_count"] = lb_instance_health( + connection, name, instances, "Unknown" + ) + description["attributes"] = get_lb_attributes(connection, name) return description @AWSRetry.jittered_backoff() def get_all_lb(connection): - paginator = connection.get_paginator('describe_load_balancers') - return paginator.paginate().build_full_result()['LoadBalancerDescriptions'] + paginator = connection.get_paginator("describe_load_balancers") + return paginator.paginate().build_full_result()["LoadBalancerDescriptions"] def get_lb(connection, load_balancer_name): try: - return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])['LoadBalancerDescriptions'][0] - except is_boto3_error_code('LoadBalancerNotFound'): + return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])[ + "LoadBalancerDescriptions" + ][0] + except is_boto3_error_code("LoadBalancerNotFound"): return [] def get_lb_attributes(connection, load_balancer_name): - attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get('LoadBalancerAttributes', {}) + attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get( + "LoadBalancerAttributes", {} + ) return camel_dict_to_snake_dict(attributes) def get_tags(connection, load_balancer_name): - tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])['TagDescriptions'] + tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])["TagDescriptions"] if not tags: return {} - return boto3_tag_list_to_ansible_dict(tags[0]['Tags']) + return boto3_tag_list_to_ansible_dict(tags[0]["Tags"]) def lb_instance_health(connection, load_balancer_name, instances, state): - instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', []) - instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state] + instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get( + "InstanceStates", [] + ) + instate = [instance["InstanceId"] for instance in instance_states if instance["State"] == state] return instate, len(instate) def main(): argument_spec = dict( - names=dict(default=[], type='list', elements='str') + names=dict(default=[], type="list", elements="str"), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - connection = module.client('elb', retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY)) + connection = module.client( + "elb", retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY) + ) try: - elbs = list_elbs(connection, module.params.get('names')) + elbs = list_elbs(connection, module.params.get("names")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get load balancer information.") module.exit_json(elbs=elbs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elb_instance.py b/ansible_collections/community/aws/plugins/modules/elb_instance.py index ecea32a63..6489a86bc 100644 --- a/ansible_collections/community/aws/plugins/modules/elb_instance.py +++ b/ansible_collections/community/aws/plugins/modules/elb_instance.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_instance version_added: 1.0.0 @@ -15,7 +13,8 @@ description: - This module de-registers or registers an AWS EC2 instance from the ELBs that it belongs to. - Will be marked changed when called only if there are ELBs found to operate on. -author: "John Jarvis (@jarv)" +author: + - "John Jarvis (@jarv)" options: state: description: @@ -55,13 +54,13 @@ options: default: 0 type: int notes: -- The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release - 4.0.0 is no longer set. + - The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release + 4.0.0 is no longer set. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" EXAMPLES = r""" # basic pre_task and post_task example @@ -83,22 +82,23 @@ post_tasks: delegate_to: localhost """ -RETURN = ''' +RETURN = r""" updated_elbs: description: A list of ELB names that the instance has been added to or removed from. returned: always type: list elements: str -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class ElbManager: @@ -107,9 +107,9 @@ class ElbManager: def __init__(self, module, instance_id=None, ec2_elbs=None): retry_decorator = AWSRetry.jittered_backoff() self.module = module - self.client_asg = module.client('autoscaling', retry_decorator=retry_decorator) - self.client_ec2 = module.client('ec2', retry_decorator=retry_decorator) - self.client_elb = module.client('elb', retry_decorator=retry_decorator) + self.client_asg = module.client("autoscaling", retry_decorator=retry_decorator) + self.client_ec2 = module.client("ec2", retry_decorator=retry_decorator) + self.client_elb = module.client("elb", retry_decorator=retry_decorator) self.instance_id = instance_id self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False @@ -120,11 +120,11 @@ class ElbManager: to report it out-of-service""" for lb in self.lbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id not in instance_ids: continue - self.updated_elbs.add(lb['LoadBalancerName']) + self.updated_elbs.add(lb["LoadBalancerName"]) if self.module.check_mode: self.changed = True @@ -133,12 +133,13 @@ class ElbManager: try: self.client_elb.deregister_instances_from_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to deregister instance from load balancer', - load_balancer=lb, instance=self.instance_id) + self.module.fail_json_aws( + e, "Failed to deregister instance from load balancer", load_balancer=lb, instance=self.instance_id + ) # The ELB is changing state in some way. Either an instance that's # InService is moving to OutOfService, or an instance that's @@ -147,17 +148,17 @@ class ElbManager: if wait: for lb in self.lbs: - self._await_elb_instance_state(lb, 'Deregistered', timeout) + self._await_elb_instance_state(lb, "Deregistered", timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id in instance_ids: continue - self.updated_elbs.add(lb['LoadBalancerName']) + self.updated_elbs.add(lb["LoadBalancerName"]) if enable_availability_zone: self.changed |= self._enable_availailability_zone(lb) @@ -169,31 +170,32 @@ class ElbManager: try: self.client_elb.register_instances_with_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to register instance with load balancer', - load_balancer=lb, instance=self.instance_id) + self.module.fail_json_aws( + e, "Failed to register instance with load balancer", load_balancer=lb, instance=self.instance_id + ) self.changed = True if wait: for lb in self.lbs: - self._await_elb_instance_state(lb, 'InService', timeout) + self._await_elb_instance_state(lb, "InService", timeout) @AWSRetry.jittered_backoff() def _describe_elbs(self, **params): - paginator = self.client_elb.get_paginator('describe_load_balancers') + paginator = self.client_elb.get_paginator("describe_load_balancers") results = paginator.paginate(**params).build_full_result() - return results['LoadBalancerDescriptions'] + return results["LoadBalancerDescriptions"] def exists(self, lbtest): - """ Verify that the named ELB actually exists """ + """Verify that the named ELB actually exists""" found = False for lb in self.lbs: - if lb['LoadBalancerName'] == lbtest: + if lb["LoadBalancerName"] == lbtest: found = True break return found @@ -203,9 +205,9 @@ class ElbManager: Returns True if the zone was enabled or False if no change was made. lb: load balancer""" instance = self._get_instance() - desired_zone = instance['Placement']['AvailabilityZone'] + desired_zone = instance["Placement"]["AvailabilityZone"] - if desired_zone in lb['AvailabilityZones']: + if desired_zone in lb["AvailabilityZones"]: return False if self.module.check_mode: @@ -214,12 +216,11 @@ class ElbManager: try: self.client_elb.enable_availability_zones_for_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], AvailabilityZones=[desired_zone], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to enable AZ on load balancers', - load_balancer=lb, zone=desired_zone) + self.module.fail_json_aws(e, "Failed to enable AZ on load balancers", load_balancer=lb, zone=desired_zone) return True @@ -233,27 +234,29 @@ class ElbManager: if awaited_state == initial_state: return - if awaited_state == 'InService': - waiter = self.client_elb.get_waiter('instance_in_service') - elif awaited_state == 'Deregistered': - waiter = self.client_elb.get_waiter('instance_deregistered') - elif awaited_state == 'OutOfService': - waiter = self.client_elb.get_waiter('instance_deregistered') + if awaited_state == "InService": + waiter = self.client_elb.get_waiter("instance_in_service") + elif awaited_state == "Deregistered": + waiter = self.client_elb.get_waiter("instance_deregistered") + elif awaited_state == "OutOfService": + waiter = self.client_elb.get_waiter("instance_deregistered") else: - self.module.fail_json(msg='Could not wait for unknown state', awaited_state=awaited_state) + self.module.fail_json(msg="Could not wait for unknown state", awaited_state=awaited_state) try: waiter.wait( - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], - WaiterConfig={'Delay': 1, 'MaxAttempts': timeout}, + WaiterConfig={"Delay": 1, "MaxAttempts": timeout}, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, msg='Timeout waiting for instance to reach desired state', - awaited_state=awaited_state) + self.module.fail_json_aws( + e, msg="Timeout waiting for instance to reach desired state", awaited_state=awaited_state + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Error while waiting for instance to reach desired state', - awaited_state=awaited_state) + self.module.fail_json_aws( + e, msg="Error while waiting for instance to reach desired state", awaited_state=awaited_state + ) return @@ -265,18 +268,21 @@ class ElbManager: try: status = self.client_elb.describe_instance_health( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], - Instances=[{'InstanceId': self.instance_id}], - )['InstanceStates'] - except is_boto3_error_code('InvalidInstance'): + LoadBalancerName=lb["LoadBalancerName"], + Instances=[{"InstanceId": self.instance_id}], + )["InstanceStates"] + except is_boto3_error_code("InvalidInstance"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg='Failed to get instance health') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Failed to get instance health") if not status: return None - return status[0]['State'] + return status[0]["State"] def _get_instance_lbs(self, ec2_elbs=None): """Returns a list of ELBs attached to self.instance_id @@ -289,12 +295,12 @@ class ElbManager: ec2_elbs = self._get_auto_scaling_group_lbs() if ec2_elbs: - list_params['LoadBalancerNames'] = ec2_elbs + list_params["LoadBalancerNames"] = ec2_elbs try: elbs = self._describe_elbs(**list_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to describe load balancers') + self.module.fail_json_aws(e, "Failed to describe load balancers") if ec2_elbs: return elbs @@ -303,7 +309,7 @@ class ElbManager: # of. lbs = [] for lb in elbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id in instance_ids: lbs.append(lb) @@ -311,14 +317,14 @@ class ElbManager: def _get_auto_scaling_group_lbs(self): """Returns a list of ELBs associated with self.instance_id - indirectly through its auto scaling group membership""" + indirectly through its auto scaling group membership""" try: asg_instances = self.client_asg.describe_auto_scaling_instances( - aws_retry=True, - InstanceIds=[self.instance_id])['AutoScalingInstances'] + aws_retry=True, InstanceIds=[self.instance_id] + )["AutoScalingInstances"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") if len(asg_instances) > 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") @@ -327,42 +333,40 @@ class ElbManager: # Instance isn't a member of an ASG return [] - asg_name = asg_instances[0]['AutoScalingGroupName'] + asg_name = asg_instances[0]["AutoScalingGroupName"] try: asg_instances = self.client_asg.describe_auto_scaling_groups( - aws_retry=True, - AutoScalingGroupNames=[asg_name])['AutoScalingGroups'] + aws_retry=True, AutoScalingGroupNames=[asg_name] + )["AutoScalingGroups"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") if len(asg_instances) != 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group.") - return asg_instances[0]['LoadBalancerNames'] + return asg_instances[0]["LoadBalancerNames"] def _get_instance(self): """Returns the description of an instance""" try: - result = self.client_ec2.describe_instances( - aws_retry=True, - InstanceIds=[self.instance_id]) + result = self.client_ec2.describe_instances(aws_retry=True, InstanceIds=[self.instance_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') - return result['Reservations'][0]['Instances'][0] + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") + return result["Reservations"][0]["Instances"][0] def main(): argument_spec = dict( - state={'required': True, 'choices': ['present', 'absent']}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, - wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'required': False, 'default': 0, 'type': 'int'}, + state={"required": True, "choices": ["present", "absent"]}, + instance_id={"required": True}, + ec2_elbs={"default": None, "required": False, "type": "list", "elements": "str"}, + enable_availability_zone={"default": True, "required": False, "type": "bool"}, + wait={"required": False, "default": True, "type": "bool"}, + wait_timeout={"required": False, "default": 0, "type": "int"}, ) required_if = [ - ('state', 'present', ['ec2_elbs']), + ("state", "present", ["ec2_elbs"]), ] module = AnsibleAWSModule( @@ -371,22 +375,22 @@ def main(): supports_check_mode=True, ) - ec2_elbs = module.params['ec2_elbs'] - wait = module.params['wait'] - enable_availability_zone = module.params['enable_availability_zone'] - timeout = module.params['wait_timeout'] - instance_id = module.params['instance_id'] + ec2_elbs = module.params["ec2_elbs"] + wait = module.params["wait"] + enable_availability_zone = module.params["enable_availability_zone"] + timeout = module.params["wait_timeout"] + instance_id = module.params["instance_id"] elb_man = ElbManager(module, instance_id, ec2_elbs) if ec2_elbs is not None: for elb in ec2_elbs: if not elb_man.exists(elb): - module.fail_json(msg="ELB {0} does not exist".format(elb)) + module.fail_json(msg=f"ELB {elb} does not exist") - if module.params['state'] == 'present': + if module.params["state"] == "present": elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": elb_man.deregister(wait, timeout) module.exit_json( @@ -395,5 +399,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elb_network_lb.py b/ansible_collections/community/aws/plugins/modules/elb_network_lb.py index 6dcdfd209..86d8f0872 100644 --- a/ansible_collections/community/aws/plugins/modules/elb_network_lb.py +++ b/ansible_collections/community/aws/plugins/modules/elb_network_lb.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_network_lb version_added: 1.0.0 @@ -126,17 +123,17 @@ options: - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. choices: [ 'ipv4', 'dualstack' ] type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an ELB and attach a listener @@ -186,10 +183,9 @@ EXAMPLES = r''' community.aws.elb_network_lb: name: myelb state: absent +""" -''' - -RETURN = r''' +RETURN = r""" load_balancer: description: A representation of the Network Load Balancer returned: when state is present @@ -328,11 +324,17 @@ load_balancer: returned: when state is present type: str sample: vpc-0011223344 -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListener +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListeners +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def create_or_update_elb(elb_obj): @@ -346,10 +348,12 @@ def create_or_update_elb(elb_obj): # Tags - only need to play with tags if tags parameter has been set to something if elb_obj.tags is not None: - # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(elb_obj.elb["tags"]), + boto3_tag_list_to_ansible_dict(elb_obj.tags), + elb_obj.purge_tags, + ) if tags_to_delete: elb_obj.delete_tags(tags_to_delete) @@ -366,25 +370,29 @@ def create_or_update_elb(elb_obj): elb_obj.modify_elb_attributes() # Listeners - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb["LoadBalancerArn"]) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() # Delete listeners for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb["LoadBalancerArn"] + ) listener_obj.delete() listeners_obj.changed = True # Add listeners for listener_to_add in listeners_to_add: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb["LoadBalancerArn"]) listener_obj.add() listeners_obj.changed = True # Modify listeners for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb["LoadBalancerArn"] + ) listener_obj.modify() listeners_obj.changed = True @@ -393,8 +401,8 @@ def create_or_update_elb(elb_obj): elb_obj.changed = True # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) + if elb_obj.module.params.get("ip_address_type") is not None: + elb_obj.modify_ip_address_type(elb_obj.module.params.get("ip_address_type")) # Update the objects to pickup changes # Get the ELB again @@ -407,24 +415,20 @@ def create_or_update_elb(elb_obj): # Convert to snake_case and merge in everything we want to return to the user snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) - snaked_elb['listeners'] = [] + snaked_elb["listeners"] = [] for listener in listeners_obj.current_listeners: - snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener)) + snaked_elb["listeners"].append(camel_dict_to_snake_dict(listener)) # Change tags to ansible friendly dict - snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + snaked_elb["tags"] = boto3_tag_list_to_ansible_dict(snaked_elb["tags"]) # ip address type - snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() + snaked_elb["ip_address_type"] = elb_obj.get_elb_ip_address_type() - elb_obj.module.exit_json( - changed=elb_obj.changed, - load_balancer=snaked_elb, - **snaked_elb) + elb_obj.module.exit_json(changed=elb_obj.changed, load_balancer=snaked_elb, **snaked_elb) def delete_elb(elb_obj): - if elb_obj.elb: elb_obj.delete() @@ -432,42 +436,42 @@ def delete_elb(elb_obj): def main(): - - argument_spec = ( - dict( - cross_zone_load_balancing=dict(type='bool'), - deletion_protection=dict(type='bool'), - listeners=dict(type='list', - elements='dict', - options=dict( - Protocol=dict(type='str', required=True), - Port=dict(type='int', required=True), - SslPolicy=dict(type='str'), - Certificates=dict(type='list', elements='dict'), - DefaultActions=dict(type='list', required=True, elements='dict') - ) - ), - name=dict(required=True, type='str'), - purge_listeners=dict(default=True, type='bool'), - purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list', elements='str'), - subnet_mappings=dict(type='list', elements='dict'), - scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), - state=dict(choices=['present', 'absent'], type='str', default='present'), - tags=dict(type='dict', aliases=['resource_tags']), - wait_timeout=dict(type='int'), - wait=dict(type='bool'), - ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) - ) + argument_spec = dict( + cross_zone_load_balancing=dict(type="bool"), + deletion_protection=dict(type="bool"), + listeners=dict( + type="list", + elements="dict", + options=dict( + Protocol=dict(type="str", required=True), + Port=dict(type="int", required=True), + SslPolicy=dict(type="str"), + Certificates=dict(type="list", elements="dict"), + DefaultActions=dict(type="list", required=True, elements="dict"), + ), + ), + name=dict(required=True, type="str"), + purge_listeners=dict(default=True, type="bool"), + purge_tags=dict(default=True, type="bool"), + subnets=dict(type="list", elements="str"), + subnet_mappings=dict(type="list", elements="dict"), + scheme=dict(default="internet-facing", choices=["internet-facing", "internal"]), + state=dict(choices=["present", "absent"], type="str", default="present"), + tags=dict(type="dict", aliases=["resource_tags"]), + wait_timeout=dict(type="int"), + wait=dict(type="bool"), + ip_address_type=dict(type="str", choices=["ipv4", "dualstack"]), ) required_if = [ - ('state', 'present', ('subnets', 'subnet_mappings',), True) + ["state", "present", ["subnets", "subnet_mappings"], True], ] - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=required_if, - mutually_exclusive=[['subnets', 'subnet_mappings']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, + mutually_exclusive=[["subnets", "subnet_mappings"]], + ) # Check for subnets or subnet_mappings if state is present state = module.params.get("state") @@ -477,20 +481,20 @@ def main(): if listeners is not None: for listener in listeners: for key in listener.keys(): - protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP'] - if key == 'Protocol' and listener[key] not in protocols_list: + protocols_list = ["TCP", "TLS", "UDP", "TCP_UDP"] + if key == "Protocol" and listener[key] not in protocols_list: module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list)) - connection = module.client('elbv2') - connection_ec2 = module.client('ec2') + connection = module.client("elbv2") + connection_ec2 = module.client("ec2") elb = NetworkLoadBalancer(connection, connection_ec2, module) - if state == 'present': + if state == "present": create_or_update_elb(elb) else: delete_elb(elb) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elb_target.py b/ansible_collections/community/aws/plugins/modules/elb_target.py index cff46a62a..22074d496 100644 --- a/ansible_collections/community/aws/plugins/modules/elb_target.py +++ b/ansible_collections/community/aws/plugins/modules/elb_target.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elb_target version_added: 1.0.0 short_description: Manage a target in a target group description: - - Used to register or deregister a target in a target group. -author: "Rob White (@wimnat)" + - Used to register or deregister a target in a target group. +author: + - "Rob White (@wimnat)" options: deregister_unused: description: @@ -68,16 +67,17 @@ options: required: true choices: [ 'present', 'absent' ] type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 notes: - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it. -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Register an IP address target to a target group @@ -105,14 +105,13 @@ EXAMPLES = ''' target_id: i-1234567 target_port: 8080 state: present +""" -''' - -RETURN = ''' - -''' +RETURN = r""" +""" -from time import time, sleep +from time import sleep +from time import time try: import botocore @@ -121,28 +120,28 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) + +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) def describe_target_groups_with_backoff(connection, tg_name): return connection.describe_target_groups(Names=[tg_name]) def convert_tg_name_to_arn(connection, module, tg_name): - try: response = describe_target_groups_with_backoff(connection, tg_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name)) + module.fail_json_aws(e, msg=f"Unable to describe target group {tg_name}") - tg_arn = response['TargetGroups'][0]['TargetGroupArn'] + tg_arn = response["TargetGroups"][0]["TargetGroupArn"] return tg_arn -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) def describe_targets_with_backoff(connection, tg_arn, target): if target is None: tg = [] @@ -153,7 +152,6 @@ def describe_targets_with_backoff(connection, tg_arn, target): def describe_targets(connection, module, tg_arn, target=None): - """ Describe targets in a target group @@ -165,12 +163,12 @@ def describe_targets(connection, module, tg_arn, target=None): """ try: - targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions'] + targets = describe_targets_with_backoff(connection, tg_arn, target)["TargetHealthDescriptions"] if not targets: return {} return targets[0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe target health for target {0}".format(target)) + module.fail_json_aws(e, msg=f"Unable to describe target health for target {target}") @AWSRetry.jittered_backoff(retries=10, delay=10) @@ -179,7 +177,6 @@ def register_target_with_backoff(connection, target_group_arn, target): def register_target(connection, module): - """ Registers a target to a target group @@ -201,26 +198,32 @@ def register_target(connection, module): target = dict(Id=target_id) if target_az: - target['AvailabilityZone'] = target_az + target["AvailabilityZone"] = target_az if target_port: - target['Port'] = target_port + target["Port"] = target_port target_description = describe_targets(connection, module, target_group_arn, target) - if 'Reason' in target_description['TargetHealth']: - if target_description['TargetHealth']['Reason'] == "Target.NotRegistered": + if "Reason" in target_description["TargetHealth"]: + if target_description["TargetHealth"]["Reason"] == "Target.NotRegistered": try: register_target_with_backoff(connection, target_group_arn, target) changed = True if target_status: - target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) + target_status_check( + connection, module, target_group_arn, target, target_status, target_status_timeout + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target)) + module.fail_json_aws(e, msg=f"Unable to deregister target {target}") # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) - module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + module.exit_json( + changed=changed, + target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), + target_group_arn=target_group_arn, + ) @AWSRetry.jittered_backoff(retries=10, delay=10) @@ -229,7 +232,6 @@ def deregister_target_with_backoff(connection, target_group_arn, target): def deregister_target(connection, module): - """ Deregisters a target to a target group @@ -251,18 +253,18 @@ def deregister_target(connection, module): target = dict(Id=target_id) if target_port: - target['Port'] = target_port + target["Port"] = target_port target_description = describe_targets(connection, module, target_group_arn, target) - current_target_state = target_description['TargetHealth']['State'] - current_target_reason = target_description['TargetHealth'].get('Reason') + current_target_state = target_description["TargetHealth"]["State"] + current_target_reason = target_description["TargetHealth"].get("Reason") needs_deregister = False - if deregister_unused and current_target_state == 'unused': - if current_target_reason != 'Target.NotRegistered': + if deregister_unused and current_target_state == "unused": + if current_target_reason != "Target.NotRegistered": needs_deregister = True - elif current_target_state not in ['unused', 'draining']: + elif current_target_state not in ["unused", "draining"]: needs_deregister = True if needs_deregister: @@ -270,11 +272,13 @@ def deregister_target(connection, module): deregister_target_with_backoff(connection, target_group_arn, target) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Unable to deregister target {0}".format(target)) + module.fail_json(msg=f"Unable to deregister target {target}") else: - if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining': - module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " + - "To force deregistration use the 'deregister_unused' option.") + if current_target_reason != "Target.NotRegistered" and current_target_state != "draining": + module.warn( + warning="Your specified target has an 'unused' state but is still registered to the target group. " + + "To force deregistration use the 'deregister_unused' option." + ) if target_status: target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) @@ -282,53 +286,60 @@ def deregister_target(connection, module): # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) - module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + module.exit_json( + changed=changed, + target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), + target_group_arn=target_group_arn, + ) def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout): reached_state = False timeout = target_status_timeout + time() while time() < timeout: - health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State'] + health_state = describe_targets(connection, module, target_group_arn, target)["TargetHealth"]["State"] if health_state == target_status: reached_state = True break sleep(1) if not reached_state: - module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state)) + module.fail_json( + msg=f"Status check timeout of {target_status_timeout} exceeded, last status was {health_state}: " + ) def main(): - argument_spec = dict( - deregister_unused=dict(type='bool', default=False), - target_az=dict(type='str'), - target_group_arn=dict(type='str'), - target_group_name=dict(type='str'), - target_id=dict(type='str', required=True), - target_port=dict(type='int'), - target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'), - target_status_timeout=dict(type='int', default=60), - state=dict(required=True, choices=['present', 'absent'], type='str'), + deregister_unused=dict(type="bool", default=False), + target_az=dict(type="str"), + target_group_arn=dict(type="str"), + target_group_name=dict(type="str"), + target_id=dict(type="str", required=True), + target_port=dict(type="int"), + target_status=dict( + choices=["initial", "healthy", "unhealthy", "unused", "draining", "unavailable"], type="str" + ), + target_status_timeout=dict(type="int", default=60), + state=dict(required=True, choices=["present", "absent"], type="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['target_group_arn', 'target_group_name']], + mutually_exclusive=[["target_group_arn", "target_group_name"]], ) try: - connection = module.client('elbv2') + connection = module.client("elbv2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") state = module.params.get("state") - if state == 'present': + if state == "present": register_target(connection, module) else: deregister_target(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_group.py b/ansible_collections/community/aws/plugins/modules/elb_target_group.py index 45a6e7ae9..71a859ead 100644 --- a/ansible_collections/community/aws/plugins/modules/elb_target_group.py +++ b/ansible_collections/community/aws/plugins/modules/elb_target_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_target_group version_added: 1.0.0 @@ -204,17 +202,18 @@ options: - The time to wait for the target group. default: 200 type: int -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags notes: - Once a target group has been created, only its health check can then be modified using subsequent calls -''' -EXAMPLES = r''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a target group with a default health check @@ -271,7 +270,7 @@ EXAMPLES = r''' Port: 80 state: present wait_timeout: 200 - wait: True + wait: true - name: Create a target group with IP address targets community.aws.elb_target_group: @@ -291,7 +290,7 @@ EXAMPLES = r''' Port: 80 state: present wait_timeout: 200 - wait: True + wait: true # Using lambda as targets require that the target group # itself is allow to invoke the lambda function. @@ -304,7 +303,7 @@ EXAMPLES = r''' name: my-lambda-targetgroup target_type: lambda state: present - modify_targets: False + modify_targets: false register: out - name: second, allow invoke of the lambda @@ -322,11 +321,10 @@ EXAMPLES = r''' target_type: lambda state: present targets: - - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function - -''' + - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function +""" -RETURN = r''' +RETURN = r""" deregistration_delay_timeout_seconds: description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. returned: when state present @@ -437,7 +435,7 @@ vpc_id: returned: when state present type: str sample: vpc-0123456 -''' +""" import time @@ -448,56 +446,64 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_tg_attributes(connection, module, tg_arn): try: _attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True) - tg_attributes = boto3_tag_list_to_ansible_dict(_attributes['Attributes']) + tg_attributes = boto3_tag_list_to_ansible_dict(_attributes["Attributes"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansible friendly - return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items()) + return dict((k.replace(".", "_"), v) for k, v in tg_attributes.items()) def get_target_group_tags(connection, module, target_group_arn): try: _tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True) - return _tags['TagDescriptions'][0]['Tags'] + return _tags["TagDescriptions"][0]["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group tags") def get_target_group(connection, module, retry_missing=False): - extra_codes = ['TargetGroupNotFound'] if retry_missing else [] + extra_codes = ["TargetGroupNotFound"] if retry_missing else [] try: - target_group_paginator = connection.get_paginator('describe_target_groups').paginate(Names=[module.params.get("name")]) + target_group_paginator = connection.get_paginator("describe_target_groups").paginate( + Names=[module.params.get("name")] + ) jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes) result = jittered_retry(target_group_paginator.build_full_result)() - except is_boto3_error_code('TargetGroupNotFound'): + except is_boto3_error_code("TargetGroupNotFound"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get target group") - return result['TargetGroups'][0] + return result["TargetGroups"][0] def wait_for_status(connection, module, target_group_arn, targets, status): polling_increment_secs = 5 - max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + max_retries = module.params.get("wait_timeout") // polling_increment_secs status_achieved = False for x in range(0, max_retries): try: - response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True) - if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status: + response = connection.describe_target_health( + TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True + ) + if response["TargetHealthDescriptions"][0]["TargetHealth"]["State"] == status: status_achieved = True break else: @@ -527,156 +533,186 @@ def create_or_update_attributes(connection, module, target_group, new_target_gro update_attributes = [] # Get current attributes - current_tg_attributes = get_tg_attributes(connection, module, target_group['TargetGroupArn']) + current_tg_attributes = get_tg_attributes(connection, module, target_group["TargetGroupArn"]) if deregistration_delay_timeout is not None: - if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: - update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) + if str(deregistration_delay_timeout) != current_tg_attributes["deregistration_delay_timeout_seconds"]: + update_attributes.append( + {"Key": "deregistration_delay.timeout_seconds", "Value": str(deregistration_delay_timeout)} + ) if deregistration_connection_termination is not None: - if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true": - update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'}) + if ( + deregistration_connection_termination + and current_tg_attributes.get("deregistration_delay_connection_termination_enabled") != "true" + ): + update_attributes.append({"Key": "deregistration_delay.connection_termination.enabled", "Value": "true"}) if stickiness_enabled is not None: - if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": - update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) + if stickiness_enabled and current_tg_attributes["stickiness_enabled"] != "true": + update_attributes.append({"Key": "stickiness.enabled", "Value": "true"}) if stickiness_lb_cookie_duration is not None: - if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)}) + if str(stickiness_lb_cookie_duration) != current_tg_attributes["stickiness_lb_cookie_duration_seconds"]: + update_attributes.append( + {"Key": "stickiness.lb_cookie.duration_seconds", "Value": str(stickiness_lb_cookie_duration)} + ) if stickiness_type is not None: - if stickiness_type != current_tg_attributes.get('stickiness_type'): - update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) + if stickiness_type != current_tg_attributes.get("stickiness_type"): + update_attributes.append({"Key": "stickiness.type", "Value": stickiness_type}) if stickiness_app_cookie_name is not None: - if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'): - update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)}) + if stickiness_app_cookie_name != current_tg_attributes.get("stickiness_app_cookie_name"): + update_attributes.append( + {"Key": "stickiness.app_cookie.cookie_name", "Value": str(stickiness_app_cookie_name)} + ) if stickiness_app_cookie_duration is not None: - if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)}) + if str(stickiness_app_cookie_duration) != current_tg_attributes["stickiness_app_cookie_duration_seconds"]: + update_attributes.append( + {"Key": "stickiness.app_cookie.duration_seconds", "Value": str(stickiness_app_cookie_duration)} + ) if preserve_client_ip_enabled is not None: - if target_type not in ('udp', 'tcp_udp'): - if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'): - update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()}) + if target_type not in ("udp", "tcp_udp"): + if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get("preserve_client_ip_enabled"): + update_attributes.append( + {"Key": "preserve_client_ip.enabled", "Value": str(preserve_client_ip_enabled).lower()} + ) if proxy_protocol_v2_enabled is not None: - if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'): - update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()}) + if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get("proxy_protocol_v2_enabled"): + update_attributes.append( + {"Key": "proxy_protocol_v2.enabled", "Value": str(proxy_protocol_v2_enabled).lower()} + ) if load_balancing_algorithm_type is not None: - if str(load_balancing_algorithm_type) != current_tg_attributes['load_balancing_algorithm_type']: - update_attributes.append({'Key': 'load_balancing.algorithm.type', 'Value': str(load_balancing_algorithm_type)}) + if str(load_balancing_algorithm_type) != current_tg_attributes["load_balancing_algorithm_type"]: + update_attributes.append( + {"Key": "load_balancing.algorithm.type", "Value": str(load_balancing_algorithm_type)} + ) if update_attributes: try: - connection.modify_target_group_attributes(TargetGroupArn=target_group['TargetGroupArn'], Attributes=update_attributes, aws_retry=True) + connection.modify_target_group_attributes( + TargetGroupArn=target_group["TargetGroupArn"], Attributes=update_attributes, aws_retry=True + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state if new_target_group: - connection.delete_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) + connection.delete_target_group(TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True) module.fail_json_aws(e, msg="Couldn't delete target group") return changed def create_or_update_target_group(connection, module): - changed = False new_target_group = False params = dict() target_type = module.params.get("target_type") - params['Name'] = module.params.get("name") - params['TargetType'] = target_type + params["Name"] = module.params.get("name") + params["TargetType"] = target_type if target_type != "lambda": - params['Protocol'] = module.params.get("protocol").upper() - if module.params.get('protocol_version') is not None: - params['ProtocolVersion'] = module.params.get('protocol_version') - params['Port'] = module.params.get("port") - params['VpcId'] = module.params.get("vpc_id") + params["Protocol"] = module.params.get("protocol").upper() + if module.params.get("protocol_version") is not None: + params["ProtocolVersion"] = module.params.get("protocol_version") + params["Port"] = module.params.get("port") + params["VpcId"] = module.params.get("vpc_id") tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") health_option_keys = [ - "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", - "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes" + "health_check_path", + "health_check_protocol", + "health_check_interval", + "health_check_timeout", + "healthy_threshold_count", + "unhealthy_threshold_count", + "successful_response_codes", ] health_options = any(module.params[health_option_key] is not None for health_option_key in health_option_keys) # Set health check if anything set if health_options: - if module.params.get("health_check_protocol") is not None: - params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper() + params["HealthCheckProtocol"] = module.params.get("health_check_protocol").upper() if module.params.get("health_check_port") is not None: - params['HealthCheckPort'] = module.params.get("health_check_port") + params["HealthCheckPort"] = module.params.get("health_check_port") if module.params.get("health_check_interval") is not None: - params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval") + params["HealthCheckIntervalSeconds"] = module.params.get("health_check_interval") if module.params.get("health_check_timeout") is not None: - params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout") + params["HealthCheckTimeoutSeconds"] = module.params.get("health_check_timeout") if module.params.get("healthy_threshold_count") is not None: - params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count") + params["HealthyThresholdCount"] = module.params.get("healthy_threshold_count") if module.params.get("unhealthy_threshold_count") is not None: - params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count") + params["UnhealthyThresholdCount"] = module.params.get("unhealthy_threshold_count") # Only need to check response code and path for http(s) health checks protocol = module.params.get("health_check_protocol") - if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']: - + if protocol is not None and protocol.upper() in ["HTTP", "HTTPS"]: if module.params.get("health_check_path") is not None: - params['HealthCheckPath'] = module.params.get("health_check_path") + params["HealthCheckPath"] = module.params.get("health_check_path") if module.params.get("successful_response_codes") is not None: - params['Matcher'] = {} - code_key = 'HttpCode' - protocol_version = module.params.get('protocol_version') + params["Matcher"] = {} + code_key = "HttpCode" + protocol_version = module.params.get("protocol_version") if protocol_version is not None and protocol_version.upper() == "GRPC": - code_key = 'GrpcCode' - params['Matcher'][code_key] = module.params.get("successful_response_codes") + code_key = "GrpcCode" + params["Matcher"][code_key] = module.params.get("successful_response_codes") # Get target group target_group = get_target_group(connection, module) if target_group: - diffs = [param for param in ('Port', 'Protocol', 'VpcId') - if target_group.get(param) != params.get(param)] + diffs = [param for param in ("Port", "Protocol", "VpcId") if target_group.get(param) != params.get(param)] if diffs: - module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % - ", ".join(diffs)) + module.fail_json(msg=f"Cannot modify {', '.join(diffs)} parameter(s) for a target group") # Target group exists so check health check parameters match what has been passed health_check_params = dict() # Modify health check if anything set if health_options: - # Health check protocol - if 'HealthCheckProtocol' in params and target_group['HealthCheckProtocol'] != params['HealthCheckProtocol']: - health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol'] + if "HealthCheckProtocol" in params and target_group["HealthCheckProtocol"] != params["HealthCheckProtocol"]: + health_check_params["HealthCheckProtocol"] = params["HealthCheckProtocol"] # Health check port - if 'HealthCheckPort' in params and target_group['HealthCheckPort'] != params['HealthCheckPort']: - health_check_params['HealthCheckPort'] = params['HealthCheckPort'] + if "HealthCheckPort" in params and target_group["HealthCheckPort"] != params["HealthCheckPort"]: + health_check_params["HealthCheckPort"] = params["HealthCheckPort"] # Health check interval - if 'HealthCheckIntervalSeconds' in params and target_group['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']: - health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds'] + if ( + "HealthCheckIntervalSeconds" in params + and target_group["HealthCheckIntervalSeconds"] != params["HealthCheckIntervalSeconds"] + ): + health_check_params["HealthCheckIntervalSeconds"] = params["HealthCheckIntervalSeconds"] # Health check timeout - if 'HealthCheckTimeoutSeconds' in params and target_group['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']: - health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds'] + if ( + "HealthCheckTimeoutSeconds" in params + and target_group["HealthCheckTimeoutSeconds"] != params["HealthCheckTimeoutSeconds"] + ): + health_check_params["HealthCheckTimeoutSeconds"] = params["HealthCheckTimeoutSeconds"] # Healthy threshold - if 'HealthyThresholdCount' in params and target_group['HealthyThresholdCount'] != params['HealthyThresholdCount']: - health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount'] + if ( + "HealthyThresholdCount" in params + and target_group["HealthyThresholdCount"] != params["HealthyThresholdCount"] + ): + health_check_params["HealthyThresholdCount"] = params["HealthyThresholdCount"] # Unhealthy threshold - if 'UnhealthyThresholdCount' in params and target_group['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']: - health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount'] + if ( + "UnhealthyThresholdCount" in params + and target_group["UnhealthyThresholdCount"] != params["UnhealthyThresholdCount"] + ): + health_check_params["UnhealthyThresholdCount"] = params["UnhealthyThresholdCount"] # Only need to check response code and path for http(s) health checks - if target_group['HealthCheckProtocol'] in ['HTTP', 'HTTPS']: + if target_group["HealthCheckProtocol"] in ["HTTP", "HTTPS"]: # Health check path - if 'HealthCheckPath' in params and target_group['HealthCheckPath'] != params['HealthCheckPath']: - health_check_params['HealthCheckPath'] = params['HealthCheckPath'] + if "HealthCheckPath" in params and target_group["HealthCheckPath"] != params["HealthCheckPath"]: + health_check_params["HealthCheckPath"] = params["HealthCheckPath"] # Matcher (successful response codes) # TODO: required and here? @@ -687,12 +723,14 @@ def create_or_update_target_group(connection, module): current_matcher_list = target_group["Matcher"][code_key].split(",") requested_matcher_list = params["Matcher"][code_key].split(",") if set(current_matcher_list) != set(requested_matcher_list): - health_check_params['Matcher'] = {} - health_check_params['Matcher'][code_key] = ','.join(requested_matcher_list) + health_check_params["Matcher"] = {} + health_check_params["Matcher"][code_key] = ",".join(requested_matcher_list) try: if health_check_params: - connection.modify_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True, **health_check_params) + connection.modify_target_group( + TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True, **health_check_params + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update target group") @@ -703,27 +741,27 @@ def create_or_update_target_group(connection, module): # describe_target_health seems to be the only way to get them try: current_targets = connection.describe_target_health( - TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) + TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group health") if module.params.get("targets"): - if target_type != "lambda": - params['Targets'] = module.params.get("targets") + params["Targets"] = module.params.get("targets") # Correct type of target ports - for target in params['Targets']: - target['Port'] = int(target.get('Port', module.params.get('port'))) + for target in params["Targets"]: + target["Port"] = int(target.get("Port", module.params.get("port"))) current_instance_ids = [] - for instance in current_targets['TargetHealthDescriptions']: - current_instance_ids.append(instance['Target']['Id']) + for instance in current_targets["TargetHealthDescriptions"]: + current_instance_ids.append(instance["Target"]["Id"]) new_instance_ids = [] - for instance in params['Targets']: - new_instance_ids.append(instance['Id']) + for instance in params["Targets"]: + new_instance_ids.append(instance["Id"]) add_instances = set(new_instance_ids) - set(current_instance_ids) @@ -738,37 +776,49 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_add, aws_retry=True) + connection.register_targets( + TargetGroupArn=target_group["TargetGroupArn"], Targets=instances_to_add, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_add, 'healthy') + connection, module, target_group["TargetGroupArn"], instances_to_add, "healthy" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target registration to be healthy - please check the AWS console') + msg="Error waiting for target registration to be healthy - please check the AWS console" + ) remove_instances = set(current_instance_ids) - set(new_instance_ids) if remove_instances: instances_to_remove = [] - for target in current_targets['TargetHealthDescriptions']: - if target['Target']['Id'] in remove_instances: - instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + for target in current_targets["TargetHealthDescriptions"]: + if target["Target"]["Id"] in remove_instances: + instances_to_remove.append( + {"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]} + ) changed = True try: - connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets( + TargetGroupArn=target_group["TargetGroupArn"], + Targets=instances_to_remove, + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') + connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target deregistration - please check the AWS console') + msg="Error waiting for target deregistration - please check the AWS console" + ) # register lambda target else: @@ -786,40 +836,40 @@ def create_or_update_target_group(connection, module): if changed: if target.get("Id"): response = connection.register_targets( - TargetGroupArn=target_group['TargetGroupArn'], - Targets=[ - { - "Id": target['Id'] - } - ], - aws_retry=True + TargetGroupArn=target_group["TargetGroupArn"], + Targets=[{"Id": target["Id"]}], + aws_retry=True, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't register targets") + module.fail_json_aws(e, msg="Couldn't register targets") else: if target_type != "lambda": - - current_instances = current_targets['TargetHealthDescriptions'] + current_instances = current_targets["TargetHealthDescriptions"] if current_instances: instances_to_remove = [] - for target in current_targets['TargetHealthDescriptions']: - instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + for target in current_targets["TargetHealthDescriptions"]: + instances_to_remove.append({"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]}) changed = True try: - connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets( + TargetGroupArn=target_group["TargetGroupArn"], + Targets=instances_to_remove, + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') + connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target deregistration - please check the AWS console') + msg="Error waiting for target deregistration - please check the AWS console" + ) # remove lambda targets else: @@ -830,7 +880,10 @@ def create_or_update_target_group(connection, module): target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"] if changed: connection.deregister_targets( - TargetGroupArn=target_group['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True) + TargetGroupArn=target_group["TargetGroupArn"], + Targets=[{"Id": target_to_remove}], + aws_retry=True, + ) else: try: connection.create_target_group(aws_retry=True, **params) @@ -843,33 +896,32 @@ def create_or_update_target_group(connection, module): if module.params.get("targets"): if target_type != "lambda": - params['Targets'] = module.params.get("targets") + params["Targets"] = module.params.get("targets") try: - connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=params['Targets'], aws_retry=True) + connection.register_targets( + TargetGroupArn=target_group["TargetGroupArn"], Targets=params["Targets"], aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status(connection, module, target_group['TargetGroupArn'], params['Targets'], 'healthy') + status_achieved, registered_instances = wait_for_status( + connection, module, target_group["TargetGroupArn"], params["Targets"], "healthy" + ) if not status_achieved: - module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console') + module.fail_json( + msg="Error waiting for target registration to be healthy - please check the AWS console" + ) else: try: target = module.params.get("targets")[0] response = connection.register_targets( - TargetGroupArn=target_group['TargetGroupArn'], - Targets=[ - { - "Id": target["Id"] - } - ], - aws_retry=True + TargetGroupArn=target_group["TargetGroupArn"], Targets=[{"Id": target["Id"]}], aws_retry=True ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't register targets") + module.fail_json_aws(e, msg="Couldn't register targets") attributes_update = create_or_update_attributes(connection, module, target_group, new_target_group) @@ -879,13 +931,17 @@ def create_or_update_target_group(connection, module): # Tags - only need to play with tags if tags parameter has been set to something if tags is not None: # Get tags - current_tags = get_target_group_tags(connection, module, target_group['TargetGroupArn']) + current_tags = get_target_group_tags(connection, module, target_group["TargetGroupArn"]) # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags + ) if tags_to_delete: try: - connection.remove_tags(ResourceArns=[target_group['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True) + connection.remove_tags( + ResourceArns=[target_group["TargetGroupArn"]], TagKeys=tags_to_delete, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags from target group") changed = True @@ -893,7 +949,11 @@ def create_or_update_target_group(connection, module): # Add/update tags if tags_need_modify: try: - connection.add_tags(ResourceArns=[target_group['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True) + connection.add_tags( + ResourceArns=[target_group["TargetGroupArn"]], + Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to target group") changed = True @@ -902,12 +962,14 @@ def create_or_update_target_group(connection, module): target_group = get_target_group(connection, module) # Get the target group attributes again - target_group.update(get_tg_attributes(connection, module, target_group['TargetGroupArn'])) + target_group.update(get_tg_attributes(connection, module, target_group["TargetGroupArn"])) # Convert target_group to snake_case snaked_tg = camel_dict_to_snake_dict(target_group) - snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, target_group['TargetGroupArn'])) + snaked_tg["tags"] = boto3_tag_list_to_ansible_dict( + get_target_group_tags(connection, module, target_group["TargetGroupArn"]) + ) module.exit_json(changed=changed, **snaked_tg) @@ -918,7 +980,7 @@ def delete_target_group(connection, module): if tg: try: - connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) + connection.delete_target_group(TargetGroupArn=tg["TargetGroupArn"], aws_retry=True) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete target group") @@ -927,66 +989,69 @@ def delete_target_group(connection, module): def main(): - protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', - 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] + protocols_list = ["http", "https", "tcp", "tls", "udp", "tcp_udp", "HTTP", "HTTPS", "TCP", "TLS", "UDP", "TCP_UDP"] argument_spec = dict( - deregistration_delay_timeout=dict(type='int'), - deregistration_connection_termination=dict(type='bool', default=False), + deregistration_delay_timeout=dict(type="int"), + deregistration_connection_termination=dict(type="bool", default=False), health_check_protocol=dict(choices=protocols_list), health_check_port=dict(), health_check_path=dict(), - health_check_interval=dict(type='int'), - health_check_timeout=dict(type='int'), - healthy_threshold_count=dict(type='int'), - modify_targets=dict(default=True, type='bool'), + health_check_interval=dict(type="int"), + health_check_timeout=dict(type="int"), + healthy_threshold_count=dict(type="int"), + modify_targets=dict(default=True, type="bool"), name=dict(required=True), - port=dict(type='int'), + port=dict(type="int"), protocol=dict(choices=protocols_list), - protocol_version=dict(type='str', choices=['GRPC', 'HTTP1', 'HTTP2']), - purge_tags=dict(default=True, type='bool'), - stickiness_enabled=dict(type='bool'), + protocol_version=dict(type="str", choices=["GRPC", "HTTP1", "HTTP2"]), + purge_tags=dict(default=True, type="bool"), + stickiness_enabled=dict(type="bool"), stickiness_type=dict(), - stickiness_lb_cookie_duration=dict(type='int'), - stickiness_app_cookie_duration=dict(type='int'), + stickiness_lb_cookie_duration=dict(type="int"), + stickiness_app_cookie_duration=dict(type="int"), stickiness_app_cookie_name=dict(), - load_balancing_algorithm_type=dict(type='str', choices=['round_robin', 'least_outstanding_requests']), - state=dict(required=True, choices=['present', 'absent']), + load_balancing_algorithm_type=dict(type="str", choices=["round_robin", "least_outstanding_requests"]), + state=dict(required=True, choices=["present", "absent"]), successful_response_codes=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - target_type=dict(choices=['instance', 'ip', 'lambda', 'alb']), - targets=dict(type='list', elements='dict'), - unhealthy_threshold_count=dict(type='int'), + tags=dict(type="dict", aliases=["resource_tags"]), + target_type=dict(choices=["instance", "ip", "lambda", "alb"]), + targets=dict(type="list", elements="dict"), + unhealthy_threshold_count=dict(type="int"), vpc_id=dict(), - preserve_client_ip_enabled=dict(type='bool'), - proxy_protocol_v2_enabled=dict(type='bool'), - wait_timeout=dict(type='int', default=200), - wait=dict(type='bool', default=False) + preserve_client_ip_enabled=dict(type="bool"), + proxy_protocol_v2_enabled=dict(type="bool"), + wait_timeout=dict(type="int", default=200), + wait=dict(type="bool", default=False), ) required_by = dict( - health_check_path=['health_check_protocol'], - successful_response_codes=['health_check_protocol'], + health_check_path=["health_check_protocol"], + successful_response_codes=["health_check_protocol"], ) required_if = [ - ['target_type', 'instance', ['protocol', 'port', 'vpc_id']], - ['target_type', 'ip', ['protocol', 'port', 'vpc_id']], - ['target_type', 'alb', ['protocol', 'port', 'vpc_id']], + ["target_type", "instance", ["protocol", "port", "vpc_id"]], + ["target_type", "ip", ["protocol", "port", "vpc_id"]], + ["target_type", "alb", ["protocol", "port", "vpc_id"]], ] module = AnsibleAWSModule(argument_spec=argument_spec, required_by=required_by, required_if=required_if) - if module.params.get('target_type') is None: - module.params['target_type'] = 'instance' + if module.params.get("target_type") is None: + module.params["target_type"] = "instance" - connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - if module.params.get('state') == 'present': - if module.params.get('protocol') in ['http', 'https', 'HTTP', 'HTTPS'] and module.params.get('deregistration_connection_termination', None): - module.fail_json(msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination") + if module.params.get("state") == "present": + if module.params.get("protocol") in ["http", "https", "HTTP", "HTTPS"] and module.params.get( + "deregistration_connection_termination", None + ): + module.fail_json( + msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination" + ) create_or_update_target_group(connection, module) else: delete_target_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py b/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py index 86cc03782..d0b013bfd 100644 --- a/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py +++ b/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_target_group_info version_added: 1.0.0 short_description: Gather information about ELB target groups in AWS description: - - Gather information about ELB target groups in AWS -author: Rob White (@wimnat) + - Gather information about ELB target groups in AWS +author: + - Rob White (@wimnat) options: load_balancer_arn: description: @@ -40,13 +39,12 @@ options: type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all target groups @@ -61,10 +59,9 @@ EXAMPLES = r''' names: - tg1 - tg2 +""" -''' - -RETURN = r''' +RETURN = r""" target_groups: description: a list of target groups returned: always @@ -204,7 +201,7 @@ target_groups: returned: always type: str sample: vpc-0123456 -''' +""" try: import botocore @@ -213,47 +210,48 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule @AWSRetry.jittered_backoff(retries=10) def get_paginator(**kwargs): - paginator = client.get_paginator('describe_target_groups') + paginator = client.get_paginator("describe_target_groups") return paginator.paginate(**kwargs).build_full_result() def get_target_group_attributes(target_group_arn): - try: - target_group_attributes = boto3_tag_list_to_ansible_dict(client.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) + target_group_attributes = boto3_tag_list_to_ansible_dict( + client.describe_target_group_attributes(TargetGroupArn=target_group_arn)["Attributes"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley - return dict((k.replace('.', '_'), v) - for (k, v) in target_group_attributes.items()) + return dict((k.replace(".", "_"), v) for (k, v) in target_group_attributes.items()) def get_target_group_tags(target_group_arn): - try: - return boto3_tag_list_to_ansible_dict(client.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) + return boto3_tag_list_to_ansible_dict( + client.describe_tags(ResourceArns=[target_group_arn])["TagDescriptions"][0]["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe group tags") def get_target_group_targets_health(target_group_arn): - try: - return client.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] + return client.describe_target_health(TargetGroupArn=target_group_arn)["TargetHealthDescriptions"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get target health") def list_target_groups(): - load_balancer_arn = module.params.get("load_balancer_arn") target_group_arns = module.params.get("target_group_arns") names = module.params.get("names") @@ -268,24 +266,29 @@ def list_target_groups(): target_groups = get_paginator(TargetGroupArns=target_group_arns) if names: target_groups = get_paginator(Names=names) - except is_boto3_error_code('TargetGroupNotFound'): + except is_boto3_error_code("TargetGroupNotFound"): module.exit_json(target_groups=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to list target groups") # Get the attributes and tags for each target group - for target_group in target_groups['TargetGroups']: - target_group.update(get_target_group_attributes(target_group['TargetGroupArn'])) + for target_group in target_groups["TargetGroups"]: + target_group.update(get_target_group_attributes(target_group["TargetGroupArn"])) # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']] + snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups["TargetGroups"]] # Get tags for each target group for snaked_target_group in snaked_target_groups: - snaked_target_group['tags'] = get_target_group_tags(snaked_target_group['target_group_arn']) + snaked_target_group["tags"] = get_target_group_tags(snaked_target_group["target_group_arn"]) if collect_targets_health: - snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict( - target) for target in get_target_group_targets_health(snaked_target_group['target_group_arn'])] + snaked_target_group["targets_health_description"] = [ + camel_dict_to_snake_dict(target) + for target in get_target_group_targets_health(snaked_target_group["target_group_arn"]) + ] module.exit_json(target_groups=snaked_target_groups) @@ -295,25 +298,25 @@ def main(): global client argument_spec = dict( - load_balancer_arn=dict(type='str'), - target_group_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str'), - collect_targets_health=dict(default=False, type='bool', required=False), + load_balancer_arn=dict(type="str"), + target_group_arns=dict(type="list", elements="str"), + names=dict(type="list", elements="str"), + collect_targets_health=dict(default=False, type="bool", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']], + mutually_exclusive=[["load_balancer_arn", "target_group_arns", "names"]], supports_check_mode=True, ) try: - client = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + client = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_target_groups() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_info.py b/ansible_collections/community/aws/plugins/modules/elb_target_info.py index 4f91ac7f3..ad0b3c74b 100644 --- a/ansible_collections/community/aws/plugins/modules/elb_target_info.py +++ b/ansible_collections/community/aws/plugins/modules/elb_target_info.py @@ -1,10 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Yaakov Kuperman <ykuperman@gmail.com> # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elb_target_info version_added: 1.0.0 @@ -12,8 +12,8 @@ short_description: Gathers which target groups a target is associated with. description: - This module will search through every target group in a region to find which ones have registered a given instance ID or IP. - -author: "Yaakov Kuperman (@yaakov-github)" +author: + - "Yaakov Kuperman (@yaakov-github)" options: instance_id: description: @@ -25,109 +25,108 @@ options: - Whether or not to get target groups not used by any load balancers. type: bool default: true -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" # practical use case - dynamically de-registering and re-registering nodes - - name: Get EC2 Metadata - amazon.aws.ec2_metadata_facts: - - - name: Get initial list of target groups - delegate_to: localhost - community.aws.elb_target_info: - instance_id: "{{ ansible_ec2_instance_id }}" - region: "{{ ansible_ec2_placement_region }}" - register: target_info - - - name: save fact for later - ansible.builtin.set_fact: - original_tgs: "{{ target_info.instance_target_groups }}" - - - name: Deregister instance from all target groups - delegate_to: localhost - community.aws.elb_target: - target_group_arn: "{{ item.0.target_group_arn }}" - target_port: "{{ item.1.target_port }}" - target_az: "{{ item.1.target_az }}" - target_id: "{{ item.1.target_id }}" - state: absent - target_status: "draining" - region: "{{ ansible_ec2_placement_region }}" - with_subelements: - - "{{ original_tgs }}" - - "targets" - - # This avoids having to wait for 'elb_target' to serially deregister each - # target group. An alternative would be to run all of the 'elb_target' - # tasks async and wait for them to finish. - - - name: wait for all targets to deregister simultaneously - delegate_to: localhost - community.aws.elb_target_info: - get_unused_target_groups: false - instance_id: "{{ ansible_ec2_instance_id }}" - region: "{{ ansible_ec2_placement_region }}" - register: target_info - until: (target_info.instance_target_groups | length) == 0 - retries: 60 - delay: 10 - - - name: reregister in elbv2s - community.aws.elb_target: - region: "{{ ansible_ec2_placement_region }}" - target_group_arn: "{{ item.0.target_group_arn }}" - target_port: "{{ item.1.target_port }}" - target_az: "{{ item.1.target_az }}" - target_id: "{{ item.1.target_id }}" - state: present - target_status: "initial" - with_subelements: - - "{{ original_tgs }}" - - "targets" - - # wait until all groups associated with this instance are 'healthy' or - # 'unused' - - name: wait for registration - community.aws.elb_target_info: - get_unused_target_groups: false - instance_id: "{{ ansible_ec2_instance_id }}" - region: "{{ ansible_ec2_placement_region }}" - register: target_info - until: (target_info.instance_target_groups | - map(attribute='targets') | - flatten | - map(attribute='target_health') | - rejectattr('state', 'equalto', 'healthy') | - rejectattr('state', 'equalto', 'unused') | - list | - length) == 0 - retries: 61 - delay: 10 +- name: Get EC2 Metadata + amazon.aws.ec2_metadata_facts: + +- name: Get initial list of target groups + delegate_to: localhost + community.aws.elb_target_info: + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_info + +- name: save fact for later + ansible.builtin.set_fact: + original_tgs: "{{ target_info.instance_target_groups }}" + +- name: Deregister instance from all target groups + delegate_to: localhost + community.aws.elb_target: + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: absent + target_status: "draining" + region: "{{ ansible_ec2_placement_region }}" + with_subelements: + - "{{ original_tgs }}" + - "targets" + + # This avoids having to wait for 'elb_target' to serially deregister each + # target group. An alternative would be to run all of the 'elb_target' + # tasks async and wait for them to finish. + +- name: wait for all targets to deregister simultaneously + delegate_to: localhost + community.aws.elb_target_info: + get_unused_target_groups: false + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_info + until: (target_info.instance_target_groups | length) == 0 + retries: 60 + delay: 10 + +- name: reregister in elbv2s + community.aws.elb_target: + region: "{{ ansible_ec2_placement_region }}" + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: present + target_status: "initial" + with_subelements: + - "{{ original_tgs }}" + - "targets" + +# wait until all groups associated with this instance are 'healthy' or +# 'unused' +- name: wait for registration + community.aws.elb_target_info: + get_unused_target_groups: false + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_info + until: (target_info.instance_target_groups | + map(attribute='targets') | + flatten | + map(attribute='target_health') | + rejectattr('state', 'equalto', 'healthy') | + rejectattr('state', 'equalto', 'unused') | + list | + length) == 0 + retries: 61 + delay: 10 # using the target groups to generate AWS CLI commands to reregister the # instance - useful in case the playbook fails mid-run and manual # rollback is required - - name: "reregistration commands: ELBv2s" - ansible.builtin.debug: - msg: > - aws --region {{ansible_ec2_placement_region}} elbv2 - register-targets --target-group-arn {{item.target_group_arn}} - --targets{%for target in item.targets%} - Id={{target.target_id}}, - Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}} - {%endif%} - {%endfor%} - loop: "{{target_info.instance_target_groups}}" - +- name: "reregistration commands: ELBv2s" + ansible.builtin.debug: + msg: > + aws --region {{ansible_ec2_placement_region}} elbv2 + register-targets --target-group-arn {{item.target_group_arn}} + --targets{%for target in item.targets%} + Id={{target.target_id}}, + Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}} + {%endif%} + {%endfor%} + loop: "{{target_info.instance_target_groups}}" """ -RETURN = """ +RETURN = r""" instance_target_groups: description: a list of target groups to which the instance is registered to returned: always @@ -204,20 +203,23 @@ instance_target_groups: type: str """ -__metaclass__ = type - try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # we can handle the lack of boto3 based on the ec2 module pass -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class Target(object): """Models a target in a target group""" + def __init__(self, target_id, port, az, raw_target_health): self.target_port = port self.target_id = target_id @@ -238,10 +240,7 @@ class TargetGroup(object): self.targets = [] def add_target(self, target_id, target_port, target_az, raw_target_health): - self.targets.append(Target(target_id, - target_port, - target_az, - raw_target_health)) + self.targets.append(Target(target_id, target_port, target_az, raw_target_health)) def to_dict(self): object_dict = vars(self) @@ -253,28 +252,17 @@ class TargetGroup(object): class TargetInfoGatherer(object): - def __init__(self, module, instance_id, get_unused_target_groups): self.module = module try: - self.ec2 = self.module.client( - "ec2", - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + self.ec2 = self.module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, - msg="Couldn't connect to ec2" - ) + self.module.fail_json_aws(e, msg="Couldn't connect to ec2") try: - self.elbv2 = self.module.client( - "elbv2", - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + self.elbv2 = self.module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not connect to elbv2" - ) + self.module.fail_json_aws(e, msg="Could not connect to elbv2") self.instance_id = instance_id self.get_unused_target_groups = get_unused_target_groups @@ -282,25 +270,19 @@ class TargetInfoGatherer(object): def _get_instance_ips(self): """Fetch all IPs associated with this instance so that we can determine - whether or not an instance is in an IP-based target group""" + whether or not an instance is in an IP-based target group""" try: # get ahold of the instance in the API - reservations = self.ec2.describe_instances( - InstanceIds=[self.instance_id], - aws_retry=True - )["Reservations"] + reservations = self.ec2.describe_instances(InstanceIds=[self.instance_id], aws_retry=True)["Reservations"] except (BotoCoreError, ClientError) as e: # typically this will happen if the instance doesn't exist - self.module.fail_json_aws(e, - msg="Could not get instance info" + - " for instance '%s'" % - (self.instance_id) - ) + self.module.fail_json_aws( + e, + msg=f"Could not get instance info for instance '{self.instance_id}'", + ) if len(reservations) < 1: - self.module.fail_json( - msg="Instance ID %s could not be found" % self.instance_id - ) + self.module.fail_json(msg=f"Instance ID {self.instance_id} could not be found") instance = reservations[0]["Instances"][0] @@ -317,38 +299,36 @@ class TargetInfoGatherer(object): def _get_target_group_objects(self): """helper function to build a list of TargetGroup objects based on - the AWS API""" + the AWS API""" try: - paginator = self.elbv2.get_paginator( - "describe_target_groups" - ) + paginator = self.elbv2.get_paginator("describe_target_groups") tg_response = paginator.paginate().build_full_result() except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not describe target" + - " groups" - ) + self.module.fail_json_aws( + e, + msg="Could not describe target groups", + ) # build list of TargetGroup objects representing every target group in # the system target_groups = [] for each_tg in tg_response["TargetGroups"]: - if not self.get_unused_target_groups and \ - len(each_tg["LoadBalancerArns"]) < 1: + if not self.get_unused_target_groups and len(each_tg["LoadBalancerArns"]) < 1: # only collect target groups that actually are connected # to LBs continue target_groups.append( - TargetGroup(target_group_arn=each_tg["TargetGroupArn"], - target_group_type=each_tg["TargetType"], - ) + TargetGroup( + target_group_arn=each_tg["TargetGroupArn"], + target_group_type=each_tg["TargetType"], + ) ) return target_groups def _get_target_descriptions(self, target_groups): """Helper function to build a list of all the target descriptions - for this target in a target group""" + for this target in a target group""" # Build a list of all the target groups pointing to this instance # based on the previous list tgs = set() @@ -356,37 +336,25 @@ class TargetInfoGatherer(object): for tg in target_groups: try: # Get the list of targets for that target group - response = self.elbv2.describe_target_health( - TargetGroupArn=tg.target_group_arn, - aws_retry=True - ) + response = self.elbv2.describe_target_health(TargetGroupArn=tg.target_group_arn, aws_retry=True) except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not describe target " + - "health for target group %s" % - tg.target_group_arn - ) + self.module.fail_json_aws( + e, msg="Could not describe target " + f"health for target group {tg.target_group_arn}" + ) for t in response["TargetHealthDescriptions"]: # If the target group has this instance as a target, add to # list. This logic also accounts for the possibility of a # target being in the target group multiple times with # overridden ports - if t["Target"]["Id"] == self.instance_id or \ - t["Target"]["Id"] in self.instance_ips: - + if t["Target"]["Id"] == self.instance_id or t["Target"]["Id"] in self.instance_ips: # The 'AvailabilityZone' parameter is a weird one, see the # API docs for more. Basically it's only supposed to be # there under very specific circumstances, so we need # to account for that - az = t["Target"]["AvailabilityZone"] \ - if "AvailabilityZone" in t["Target"] \ - else None - - tg.add_target(t["Target"]["Id"], - t["Target"]["Port"], - az, - t["TargetHealth"]) + az = t["Target"]["AvailabilityZone"] if "AvailabilityZone" in t["Target"] else None + + tg.add_target(t["Target"]["Id"], t["Target"]["Port"], az, t["TargetHealth"]) # since tgs is a set, each target group will be added only # once, even though we call add on each successful match tgs.add(tg) @@ -404,8 +372,7 @@ class TargetInfoGatherer(object): def main(): argument_spec = dict( instance_id={"required": True, "type": "str"}, - get_unused_target_groups={"required": False, - "default": True, "type": "bool"} + get_unused_target_groups={"required": False, "default": True, "type": "bool"}, ) module = AnsibleAWSModule( @@ -416,10 +383,7 @@ def main(): instance_id = module.params["instance_id"] get_unused_target_groups = module.params["get_unused_target_groups"] - tg_gatherer = TargetInfoGatherer(module, - instance_id, - get_unused_target_groups - ) + tg_gatherer = TargetInfoGatherer(module, instance_id, get_unused_target_groups) instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs] diff --git a/ansible_collections/community/aws/plugins/modules/glue_connection.py b/ansible_collections/community/aws/plugins/modules/glue_connection.py index bcfacb171..18039a861 100644 --- a/ansible_collections/community/aws/plugins/modules/glue_connection.py +++ b/ansible_collections/community/aws/plugins/modules/glue_connection.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: glue_connection version_added: 1.0.0 @@ -72,12 +70,12 @@ options: - Required when I(connection_type=NETWORK). type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue connection @@ -106,9 +104,9 @@ EXAMPLES = r''' - community.aws.glue_connection: name: my-glue-connection state: absent -''' +""" -RETURN = r''' +RETURN = r""" connection_properties: description: - (deprecated) A dict of key-value pairs (converted to lowercase) used as parameters for this connection. @@ -157,11 +155,11 @@ raw_connection_properties: returned: when state is present type: dict sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'} -''' +""" -# Non-ansible imports import copy import time + try: import botocore except ImportError: @@ -169,10 +167,11 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _get_glue_connection(connection, module): @@ -187,13 +186,13 @@ def _get_glue_connection(connection, module): connection_name = module.params.get("name") connection_catalog_id = module.params.get("catalog_id") - params = {'Name': connection_name} + params = {"Name": connection_name} if connection_catalog_id is not None: - params['CatalogId'] = connection_catalog_id + params["CatalogId"] = connection_catalog_id try: - return connection.get_connection(aws_retry=True, **params)['Connection'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_connection(aws_retry=True, **params)["Connection"] + except is_boto3_error_code("EntityNotFoundException"): return None @@ -209,37 +208,50 @@ def _compare_glue_connection_params(user_params, current_params): # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description # To counter this, add the key if it's missing with a blank value - if 'Description' not in current_params: - current_params['Description'] = "" - if 'MatchCriteria' not in current_params: - current_params['MatchCriteria'] = list() - if 'PhysicalConnectionRequirements' not in current_params: - current_params['PhysicalConnectionRequirements'] = dict() - current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = [] - current_params['PhysicalConnectionRequirements']['SubnetId'] = "" - - if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \ - != current_params['ConnectionProperties']: + if "Description" not in current_params: + current_params["Description"] = "" + if "MatchCriteria" not in current_params: + current_params["MatchCriteria"] = list() + if "PhysicalConnectionRequirements" not in current_params: + current_params["PhysicalConnectionRequirements"] = dict() + current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = [] + current_params["PhysicalConnectionRequirements"]["SubnetId"] = "" + + if ( + "ConnectionProperties" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["ConnectionProperties"] != current_params["ConnectionProperties"] + ): return True - if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \ - != current_params['ConnectionType']: + if ( + "ConnectionType" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["ConnectionType"] != current_params["ConnectionType"] + ): return True - if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']: + if ( + "Description" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["Description"] != current_params["Description"] + ): return True - if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']): + if "MatchCriteria" in user_params["ConnectionInput"] and set( + user_params["ConnectionInput"]["MatchCriteria"] + ) != set(current_params["MatchCriteria"]): return True - if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']: - if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \ - != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']): + if "PhysicalConnectionRequirements" in user_params["ConnectionInput"]: + if "SecurityGroupIdList" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] and set( + user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] + ) != set(current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"]): return True - if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \ - != current_params['PhysicalConnectionRequirements']['SubnetId']: + if ( + "SubnetId" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] + and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] + != current_params["PhysicalConnectionRequirements"]["SubnetId"] + ): return True - if 'AvailabilityZone' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - user_params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] \ - != current_params['PhysicalConnectionRequirements']['AvailabilityZone']: + if ( + "AvailabilityZone" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] + and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] + != current_params["PhysicalConnectionRequirements"]["AvailabilityZone"] + ): return True return False @@ -253,11 +265,11 @@ def _await_glue_connection(connection, module): while wait_timeout > time.time(): glue_connection = _get_glue_connection(connection, module) - if glue_connection and glue_connection.get('Name'): + if glue_connection and glue_connection.get("Name"): return glue_connection time.sleep(check_interval) - module.fail_json(msg='Timeout waiting for Glue connection %s' % module.params.get('name')) + module.fail_json(msg=f"Timeout waiting for Glue connection {module.params.get('name')}") def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): @@ -272,26 +284,30 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co changed = False params = dict() - params['ConnectionInput'] = dict() - params['ConnectionInput']['Name'] = module.params.get("name") - params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type") - params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties") + params["ConnectionInput"] = dict() + params["ConnectionInput"]["Name"] = module.params.get("name") + params["ConnectionInput"]["ConnectionType"] = module.params.get("connection_type") + params["ConnectionInput"]["ConnectionProperties"] = module.params.get("connection_properties") if module.params.get("catalog_id") is not None: - params['CatalogId'] = module.params.get("catalog_id") + params["CatalogId"] = module.params.get("catalog_id") if module.params.get("description") is not None: - params['ConnectionInput']['Description'] = module.params.get("description") + params["ConnectionInput"]["Description"] = module.params.get("description") if module.params.get("match_criteria") is not None: - params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria") + params["ConnectionInput"]["MatchCriteria"] = module.params.get("match_criteria") if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements'] = dict() + params["ConnectionInput"]["PhysicalConnectionRequirements"] = dict() if module.params.get("security_groups") is not None: # Get security group IDs from names - security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True) - params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids + security_group_ids = get_ec2_security_group_ids_from_names( + module.params.get("security_groups"), connection_ec2, boto3=True + ) + params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = security_group_ids if module.params.get("subnet_id") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id") + params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] = module.params.get("subnet_id") if module.params.get("availability_zone") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] = module.params.get("availability_zone") + params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] = module.params.get( + "availability_zone" + ) # If glue_connection is not None then check if it needs to be modified, else create it if glue_connection: @@ -299,7 +315,7 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co try: # We need to slightly modify the params for an update update_params = copy.deepcopy(params) - update_params['Name'] = update_params['ConnectionInput']['Name'] + update_params["Name"] = update_params["ConnectionInput"]["Name"] if not module.check_mode: connection.update_connection(aws_retry=True, **update_params) changed = True @@ -318,12 +334,19 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co glue_connection = _await_glue_connection(connection, module) if glue_connection: - module.deprecate("The 'connection_properties' return key is deprecated and will be replaced" - " by 'raw_connection_properties'. Both values are returned for now.", - date='2024-06-01', collection_name='community.aws') - glue_connection['RawConnectionProperties'] = glue_connection['ConnectionProperties'] + module.deprecate( + ( + "The 'connection_properties' return key is deprecated and will be replaced" + " by 'raw_connection_properties'. Both values are returned for now." + ), + date="2024-06-01", + collection_name="community.aws", + ) + glue_connection["RawConnectionProperties"] = glue_connection["ConnectionProperties"] - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=['RawConnectionProperties'])) + module.exit_json( + changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=["RawConnectionProperties"]) + ) def delete_glue_connection(connection, module, glue_connection): @@ -337,9 +360,9 @@ def delete_glue_connection(connection, module, glue_connection): """ changed = False - params = {'ConnectionName': module.params.get("name")} + params = {"ConnectionName": module.params.get("name")} if module.params.get("catalog_id") is not None: - params['CatalogId'] = module.params.get("catalog_id") + params["CatalogId"] = module.params.get("catalog_id") if glue_connection: try: @@ -353,41 +376,41 @@ def delete_glue_connection(connection, module, glue_connection): def main(): - - argument_spec = ( - dict( - availability_zone=dict(type='str'), - catalog_id=dict(type='str'), - connection_properties=dict(type='dict'), - connection_type=dict(type='str', default='JDBC', choices=['CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK']), - description=dict(type='str'), - match_criteria=dict(type='list', elements='str'), - name=dict(required=True, type='str'), - security_groups=dict(type='list', elements='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - subnet_id=dict(type='str') - ) + argument_spec = dict( + availability_zone=dict(type="str"), + catalog_id=dict(type="str"), + connection_properties=dict(type="dict"), + connection_type=dict( + type="str", default="JDBC", choices=["CUSTOM", "JDBC", "KAFKA", "MARKETPLACE", "MONGODB", "NETWORK"] + ), + description=dict(type="str"), + match_criteria=dict(type="list", elements="str"), + name=dict(required=True, type="str"), + security_groups=dict(type="list", elements="str"), + state=dict(required=True, choices=["present", "absent"], type="str"), + subnet_id=dict(type="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['connection_properties']), - ('connection_type', 'NETWORK', ['availability_zone', 'security_groups', 'subnet_id']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ("state", "present", ["connection_properties"]), + ("connection_type", "NETWORK", ["availability_zone", "security_groups", "subnet_id"]), + ], + supports_check_mode=True, + ) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection_glue = module.client('glue', retry_decorator=retry_decorator) - connection_ec2 = module.client('ec2', retry_decorator=retry_decorator) + connection_glue = module.client("glue", retry_decorator=retry_decorator) + connection_ec2 = module.client("ec2", retry_decorator=retry_decorator) glue_connection = _get_glue_connection(connection_glue, module) - if module.params.get("state") == 'present': + if module.params.get("state") == "present": create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection) else: delete_glue_connection(connection_glue, module, glue_connection) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/glue_crawler.py b/ansible_collections/community/aws/plugins/modules/glue_crawler.py index a47b8eb3f..5d92219df 100644 --- a/ansible_collections/community/aws/plugins/modules/glue_crawler.py +++ b/ansible_collections/community/aws/plugins/modules/glue_crawler.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: glue_crawler version_added: 4.1.0 @@ -77,13 +75,13 @@ options: - Required when I(state=present). type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue crawler @@ -109,9 +107,9 @@ EXAMPLES = r''' - community.aws.glue_crawler: name: my-glue-crawler state: absent -''' +""" -RETURN = r''' +RETURN = r""" creation_time: description: The time and date that this crawler definition was created. returned: when state is present @@ -198,7 +196,7 @@ targets: description: List of catalog targets. returned: when state is present type: list -''' +""" try: import botocore @@ -208,22 +206,26 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _get_glue_crawler(connection, module, glue_crawler_name): - ''' + """ Get an AWS Glue crawler based on name. If not found, return None. - ''' + """ try: - return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)['Crawler'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)["Crawler"] + except is_boto3_error_code("EntityNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -239,39 +241,58 @@ def _trim_target(target): if not target: return None retval = target.copy() - if not retval.get('Exclusions', None): - retval.pop('Exclusions', None) + if not retval.get("Exclusions", None): + retval.pop("Exclusions", None) return retval def _compare_glue_crawler_params(user_params, current_params): - ''' + """ Compare Glue crawler params. If there is a difference, return True immediately else return False - ''' - if 'DatabaseName' in user_params and user_params['DatabaseName'] != current_params['DatabaseName']: + """ + if "DatabaseName" in user_params and user_params["DatabaseName"] != current_params["DatabaseName"]: return True - if 'Description' in user_params and user_params['Description'] != current_params['Description']: + if "Description" in user_params and user_params["Description"] != current_params["Description"]: return True - if 'RecrawlPolicy' in user_params and user_params['RecrawlPolicy'] != current_params['RecrawlPolicy']: + if "RecrawlPolicy" in user_params and user_params["RecrawlPolicy"] != current_params["RecrawlPolicy"]: return True - if 'Role' in user_params and user_params['Role'] != current_params['Role']: + if "Role" in user_params and user_params["Role"] != current_params["Role"]: return True - if 'SchemaChangePolicy' in user_params and user_params['SchemaChangePolicy'] != current_params['SchemaChangePolicy']: + if ( + "SchemaChangePolicy" in user_params + and user_params["SchemaChangePolicy"] != current_params["SchemaChangePolicy"] + ): return True - if 'TablePrefix' in user_params and user_params['TablePrefix'] != current_params['TablePrefix']: + if "TablePrefix" in user_params and user_params["TablePrefix"] != current_params["TablePrefix"]: return True - if 'Targets' in user_params: - if 'S3Targets' in user_params['Targets']: - if _trim_targets(user_params['Targets']['S3Targets']) != _trim_targets(current_params['Targets']['S3Targets']): + if "Targets" in user_params: + if "S3Targets" in user_params["Targets"]: + if _trim_targets(user_params["Targets"]["S3Targets"]) != _trim_targets( + current_params["Targets"]["S3Targets"] + ): return True - if 'JdbcTargets' in user_params['Targets'] and user_params['Targets']['JdbcTargets'] != current_params['Targets']['JdbcTargets']: - if _trim_targets(user_params['Targets']['JdbcTargets']) != _trim_targets(current_params['Targets']['JdbcTargets']): + if ( + "JdbcTargets" in user_params["Targets"] + and user_params["Targets"]["JdbcTargets"] != current_params["Targets"]["JdbcTargets"] + ): + if _trim_targets(user_params["Targets"]["JdbcTargets"]) != _trim_targets( + current_params["Targets"]["JdbcTargets"] + ): return True - if 'MongoDBTargets' in user_params['Targets'] and user_params['Targets']['MongoDBTargets'] != current_params['Targets']['MongoDBTargets']: + if ( + "MongoDBTargets" in user_params["Targets"] + and user_params["Targets"]["MongoDBTargets"] != current_params["Targets"]["MongoDBTargets"] + ): return True - if 'DynamoDBTargets' in user_params['Targets'] and user_params['Targets']['DynamoDBTargets'] != current_params['Targets']['DynamoDBTargets']: + if ( + "DynamoDBTargets" in user_params["Targets"] + and user_params["Targets"]["DynamoDBTargets"] != current_params["Targets"]["DynamoDBTargets"] + ): return True - if 'CatalogTargets' in user_params['Targets'] and user_params['Targets']['CatalogTargets'] != current_params['Targets']['CatalogTargets']: + if ( + "CatalogTargets" in user_params["Targets"] + and user_params["Targets"]["CatalogTargets"] != current_params["Targets"]["CatalogTargets"] + ): return True return False @@ -280,21 +301,23 @@ def _compare_glue_crawler_params(user_params, current_params): def ensure_tags(connection, module, glue_crawler): changed = False - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False account_id, partition = get_aws_account_info(module) - arn = 'arn:{0}:glue:{1}:{2}:crawler/{3}'.format(partition, module.region, account_id, module.params.get('name')) + arn = f"arn:{partition}:glue:{module.region}:{account_id}:crawler/{module.params.get('name')}" try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg='Unable to get tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to get tags for Glue crawler {module.params.get('name')}") - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + existing_tags, module.params.get("tags"), module.params.get("purge_tags") + ) if tags_to_remove: changed = True @@ -302,7 +325,7 @@ def ensure_tags(connection, module, glue_crawler): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") if tags_to_add: changed = True @@ -310,35 +333,37 @@ def ensure_tags(connection, module, glue_crawler): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") return changed def create_or_update_glue_crawler(connection, module, glue_crawler): - ''' + """ Create or update an AWS Glue crawler - ''' + """ changed = False params = dict() - params['Name'] = module.params.get('name') - params['Role'] = module.params.get('role') - params['Targets'] = module.params.get('targets') - if module.params.get('database_name') is not None: - params['DatabaseName'] = module.params.get('database_name') - if module.params.get('description') is not None: - params['Description'] = module.params.get('description') - if module.params.get('recrawl_policy') is not None: - params['RecrawlPolicy'] = snake_dict_to_camel_dict(module.params.get('recrawl_policy'), capitalize_first=True) - if module.params.get('role') is not None: - params['Role'] = module.params.get('role') - if module.params.get('schema_change_policy') is not None: - params['SchemaChangePolicy'] = snake_dict_to_camel_dict(module.params.get('schema_change_policy'), capitalize_first=True) - if module.params.get('table_prefix') is not None: - params['TablePrefix'] = module.params.get('table_prefix') - if module.params.get('targets') is not None: - params['Targets'] = module.params.get('targets') + params["Name"] = module.params.get("name") + params["Role"] = module.params.get("role") + params["Targets"] = module.params.get("targets") + if module.params.get("database_name") is not None: + params["DatabaseName"] = module.params.get("database_name") + if module.params.get("description") is not None: + params["Description"] = module.params.get("description") + if module.params.get("recrawl_policy") is not None: + params["RecrawlPolicy"] = snake_dict_to_camel_dict(module.params.get("recrawl_policy"), capitalize_first=True) + if module.params.get("role") is not None: + params["Role"] = module.params.get("role") + if module.params.get("schema_change_policy") is not None: + params["SchemaChangePolicy"] = snake_dict_to_camel_dict( + module.params.get("schema_change_policy"), capitalize_first=True + ) + if module.params.get("table_prefix") is not None: + params["TablePrefix"] = module.params.get("table_prefix") + if module.params.get("targets") is not None: + params["Targets"] = module.params.get("targets") if glue_crawler: if _compare_glue_crawler_params(params, glue_crawler): @@ -356,23 +381,26 @@ def create_or_update_glue_crawler(connection, module, glue_crawler): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - glue_crawler = _get_glue_crawler(connection, module, params['Name']) + glue_crawler = _get_glue_crawler(connection, module, params["Name"]) changed |= ensure_tags(connection, module, glue_crawler) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=['SchemaChangePolicy', 'RecrawlPolicy', 'Targets'])) + module.exit_json( + changed=changed, + **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=["SchemaChangePolicy", "RecrawlPolicy", "Targets"]), + ) def delete_glue_crawler(connection, module, glue_crawler): - ''' + """ Delete an AWS Glue crawler - ''' + """ changed = False if glue_crawler: try: if not module.check_mode: - connection.delete_crawler(aws_retry=True, Name=glue_crawler['Name']) + connection.delete_crawler(aws_retry=True, Name=glue_crawler["Name"]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -381,46 +409,39 @@ def delete_glue_crawler(connection, module, glue_crawler): def main(): - - argument_spec = ( - dict( - database_name=dict(type='str'), - description=dict(type='str'), - name=dict(required=True, type='str'), - purge_tags=dict(type='bool', default=True), - recrawl_policy=dict(type='dict', options=dict( - recrawl_behavior=dict(type='str') - )), - role=dict(type='str'), - schema_change_policy=dict(type='dict', options=dict( - delete_behavior=dict(type='str'), - update_behavior=dict(type='str') - )), - state=dict(required=True, choices=['present', 'absent'], type='str'), - table_prefix=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - targets=dict(type='dict') - ) + argument_spec = dict( + database_name=dict(type="str"), + description=dict(type="str"), + name=dict(required=True, type="str"), + purge_tags=dict(type="bool", default=True), + recrawl_policy=dict(type="dict", options=dict(recrawl_behavior=dict(type="str"))), + role=dict(type="str"), + schema_change_policy=dict( + type="dict", options=dict(delete_behavior=dict(type="str"), update_behavior=dict(type="str")) + ), + state=dict(required=True, choices=["present", "absent"], type="str"), + table_prefix=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + targets=dict(type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['role', 'targets']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["role", "targets"])], + supports_check_mode=True, + ) - connection = module.client('glue', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("glue", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - state = module.params.get('state') + state = module.params.get("state") - glue_crawler = _get_glue_crawler(connection, module, module.params.get('name')) + glue_crawler = _get_glue_crawler(connection, module, module.params.get("name")) - if state == 'present': + if state == "present": create_or_update_glue_crawler(connection, module, glue_crawler) else: delete_glue_crawler(connection, module, glue_crawler) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/glue_job.py b/ansible_collections/community/aws/plugins/modules/glue_job.py index 47d6156d7..256779975 100644 --- a/ansible_collections/community/aws/plugins/modules/glue_job.py +++ b/ansible_collections/community/aws/plugins/modules/glue_job.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: glue_job version_added: 1.0.0 @@ -103,13 +101,13 @@ options: notes: - Support for I(tags) and I(purge_tags) was added in release 2.2.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue job @@ -126,9 +124,9 @@ EXAMPLES = r''' - community.aws.glue_job: name: my-glue-job state: absent -''' +""" -RETURN = r''' +RETURN = r""" allocated_capacity: description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power @@ -223,10 +221,10 @@ timeout: returned: when state is present type: int sample: 300 -''' +""" -# Non-ansible imports import copy + try: import botocore except ImportError: @@ -234,11 +232,12 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _get_glue_job(connection, module, glue_job_name): @@ -251,10 +250,13 @@ def _get_glue_job(connection, module, glue_job_name): :return: boto3 Glue job dict or None if not found """ try: - return connection.get_job(aws_retry=True, JobName=glue_job_name)['Job'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_job(aws_retry=True, JobName=glue_job_name)["Job"] + except is_boto3_error_code("EntityNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -269,39 +271,43 @@ def _compare_glue_job_params(user_params, current_params): # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description # To counter this, add the key if it's missing with a blank value - if 'Description' not in current_params: - current_params['Description'] = "" - if 'DefaultArguments' not in current_params: - current_params['DefaultArguments'] = dict() + if "Description" not in current_params: + current_params["Description"] = "" + if "DefaultArguments" not in current_params: + current_params["DefaultArguments"] = dict() - if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']: + if "AllocatedCapacity" in user_params and user_params["AllocatedCapacity"] != current_params["AllocatedCapacity"]: return True - if 'Command' in user_params: - if user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']: + if "Command" in user_params: + if user_params["Command"]["ScriptLocation"] != current_params["Command"]["ScriptLocation"]: return True - if user_params['Command']['PythonVersion'] != current_params['Command']['PythonVersion']: + if user_params["Command"]["PythonVersion"] != current_params["Command"]["PythonVersion"]: return True - if 'Connections' in user_params and user_params['Connections'] != current_params['Connections']: + if "Connections" in user_params and user_params["Connections"] != current_params["Connections"]: return True - if 'DefaultArguments' in user_params and user_params['DefaultArguments'] != current_params['DefaultArguments']: + if "DefaultArguments" in user_params and user_params["DefaultArguments"] != current_params["DefaultArguments"]: return True - if 'Description' in user_params and user_params['Description'] != current_params['Description']: + if "Description" in user_params and user_params["Description"] != current_params["Description"]: return True - if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']: + if ( + "ExecutionProperty" in user_params + and user_params["ExecutionProperty"]["MaxConcurrentRuns"] + != current_params["ExecutionProperty"]["MaxConcurrentRuns"] + ): return True - if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: return True - if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']: + if "MaxRetries" in user_params and user_params["MaxRetries"] != current_params["MaxRetries"]: return True - if 'Role' in user_params and user_params['Role'] != current_params['Role']: + if "Role" in user_params and user_params["Role"] != current_params["Role"]: return True - if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']: + if "Timeout" in user_params and user_params["Timeout"] != current_params["Timeout"]: return True - if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: return True - if 'WorkerType' in user_params and user_params['WorkerType'] != current_params['WorkerType']: + if "WorkerType" in user_params and user_params["WorkerType"] != current_params["WorkerType"]: return True - if 'NumberOfWorkers' in user_params and user_params['NumberOfWorkers'] != current_params['NumberOfWorkers']: + if "NumberOfWorkers" in user_params and user_params["NumberOfWorkers"] != current_params["NumberOfWorkers"]: return True return False @@ -310,21 +316,23 @@ def _compare_glue_job_params(user_params, current_params): def ensure_tags(connection, module, glue_job): changed = False - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False account_id, partition = get_aws_account_info(module) - arn = 'arn:{0}:glue:{1}:{2}:job/{3}'.format(partition, module.region, account_id, module.params.get('name')) + arn = f"arn:{partition}:glue:{module.region}:{account_id}:job/{module.params.get('name')}" try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg='Unable to get tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to get tags for Glue job {module.params.get('name')}") - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + existing_tags, module.params.get("tags"), module.params.get("purge_tags") + ) if tags_to_remove: changed = True @@ -332,7 +340,7 @@ def ensure_tags(connection, module, glue_job): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") if tags_to_add: changed = True @@ -340,7 +348,7 @@ def ensure_tags(connection, module, glue_job): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") return changed @@ -357,42 +365,45 @@ def create_or_update_glue_job(connection, module, glue_job): changed = False params = dict() - params['Name'] = module.params.get("name") - params['Role'] = module.params.get("role") + params["Name"] = module.params.get("name") + params["Role"] = module.params.get("role") if module.params.get("allocated_capacity") is not None: - params['AllocatedCapacity'] = module.params.get("allocated_capacity") + params["AllocatedCapacity"] = module.params.get("allocated_capacity") if module.params.get("command_script_location") is not None: - params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")} + params["Command"] = { + "Name": module.params.get("command_name"), + "ScriptLocation": module.params.get("command_script_location"), + } if module.params.get("command_python_version") is not None: - params['Command']['PythonVersion'] = module.params.get("command_python_version") + params["Command"]["PythonVersion"] = module.params.get("command_python_version") if module.params.get("connections") is not None: - params['Connections'] = {'Connections': module.params.get("connections")} + params["Connections"] = {"Connections": module.params.get("connections")} if module.params.get("default_arguments") is not None: - params['DefaultArguments'] = module.params.get("default_arguments") + params["DefaultArguments"] = module.params.get("default_arguments") if module.params.get("description") is not None: - params['Description'] = module.params.get("description") + params["Description"] = module.params.get("description") if module.params.get("glue_version") is not None: - params['GlueVersion'] = module.params.get("glue_version") + params["GlueVersion"] = module.params.get("glue_version") if module.params.get("max_concurrent_runs") is not None: - params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")} + params["ExecutionProperty"] = {"MaxConcurrentRuns": module.params.get("max_concurrent_runs")} if module.params.get("max_retries") is not None: - params['MaxRetries'] = module.params.get("max_retries") + params["MaxRetries"] = module.params.get("max_retries") if module.params.get("timeout") is not None: - params['Timeout'] = module.params.get("timeout") + params["Timeout"] = module.params.get("timeout") if module.params.get("glue_version") is not None: - params['GlueVersion'] = module.params.get("glue_version") + params["GlueVersion"] = module.params.get("glue_version") if module.params.get("worker_type") is not None: - params['WorkerType'] = module.params.get("worker_type") + params["WorkerType"] = module.params.get("worker_type") if module.params.get("number_of_workers") is not None: - params['NumberOfWorkers'] = module.params.get("number_of_workers") + params["NumberOfWorkers"] = module.params.get("number_of_workers") # If glue_job is not None then check if it needs to be modified, else create it if glue_job: if _compare_glue_job_params(params, glue_job): try: # Update job needs slightly modified params - update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)} - del update_params['JobUpdate']['Name'] + update_params = {"JobName": params["Name"], "JobUpdate": copy.deepcopy(params)} + del update_params["JobUpdate"]["Name"] if not module.check_mode: connection.update_job(aws_retry=True, **update_params) changed = True @@ -406,11 +417,11 @@ def create_or_update_glue_job(connection, module, glue_job): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - glue_job = _get_glue_job(connection, module, params['Name']) + glue_job = _get_glue_job(connection, module, params["Name"]) changed |= ensure_tags(connection, module, glue_job) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=['DefaultArguments'])) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=["DefaultArguments"])) def delete_glue_job(connection, module, glue_job): @@ -427,7 +438,7 @@ def delete_glue_job(connection, module, glue_job): if glue_job: try: if not module.check_mode: - connection.delete_job(aws_retry=True, JobName=glue_job['Name']) + connection.delete_job(aws_retry=True, JobName=glue_job["Name"]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -436,49 +447,45 @@ def delete_glue_job(connection, module, glue_job): def main(): - - argument_spec = ( - dict( - allocated_capacity=dict(type='int'), - command_name=dict(type='str', default='glueetl'), - command_python_version=dict(type='str'), - command_script_location=dict(type='str'), - connections=dict(type='list', elements='str'), - default_arguments=dict(type='dict'), - description=dict(type='str'), - glue_version=dict(type='str'), - max_concurrent_runs=dict(type='int'), - max_retries=dict(type='int'), - name=dict(required=True, type='str'), - number_of_workers=dict(type='int'), - purge_tags=dict(type='bool', default=True), - role=dict(type='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - timeout=dict(type='int'), - worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'), - ) + argument_spec = dict( + allocated_capacity=dict(type="int"), + command_name=dict(type="str", default="glueetl"), + command_python_version=dict(type="str"), + command_script_location=dict(type="str"), + connections=dict(type="list", elements="str"), + default_arguments=dict(type="dict"), + description=dict(type="str"), + glue_version=dict(type="str"), + max_concurrent_runs=dict(type="int"), + max_retries=dict(type="int"), + name=dict(required=True, type="str"), + number_of_workers=dict(type="int"), + purge_tags=dict(type="bool", default=True), + role=dict(type="str"), + state=dict(required=True, choices=["present", "absent"], type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + timeout=dict(type="int"), + worker_type=dict(choices=["Standard", "G.1X", "G.2X"], type="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['role', 'command_script_location']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["role", "command_script_location"])], + supports_check_mode=True, + ) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('glue', retry_decorator=retry_decorator) + connection = module.client("glue", retry_decorator=retry_decorator) state = module.params.get("state") glue_job = _get_glue_job(connection, module, module.params.get("name")) - if state == 'present': + if state == "present": create_or_update_glue_job(connection, module, glue_job) else: delete_glue_job(connection, module, glue_job) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_access_key.py b/ansible_collections/community/aws/plugins/modules/iam_access_key.py deleted file mode 100644 index ad61b5b2a..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_access_key.py +++ /dev/null @@ -1,317 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2021 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: iam_access_key -version_added: 2.1.0 -short_description: Manage AWS IAM User access keys -description: - - Manage AWS IAM user access keys. -author: Mark Chappell (@tremble) -options: - user_name: - description: - - The name of the IAM User to which the key belongs. - required: true - type: str - aliases: ['username'] - id: - description: - - The ID of the access key. - - Required when I(state=absent). - - Mutually exclusive with I(rotate_keys). - required: false - type: str - state: - description: - - Create or remove the access key. - - When I(state=present) and I(id) is not defined a new key will be created. - required: false - type: str - default: 'present' - choices: [ 'present', 'absent' ] - active: - description: - - Whether the key should be enabled or disabled. - - Defaults to C(true) when creating a new key. - required: false - type: bool - aliases: ['enabled'] - rotate_keys: - description: - - When there are already 2 access keys attached to the IAM user the oldest - key will be removed and a new key created. - - Ignored if I(state=absent) - - Mutually exclusive with I(id). - required: false - type: bool - default: false - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create a new access key - community.aws.iam_access_key: - user_name: example_user - state: present - -- name: Delete the access_key - community.aws.iam_access_key: - user_name: example_user - id: AKIA1EXAMPLE1EXAMPLE - state: absent -''' - -RETURN = r''' -access_key: - description: A dictionary containing all the access key information. - returned: When the key exists. - type: complex - contains: - access_key_id: - description: The ID for the access key. - returned: success - type: str - sample: AKIA1EXAMPLE1EXAMPLE - create_date: - description: The date and time, in ISO 8601 date-time format, when the access key was created. - returned: success - type: str - sample: "2021-10-09T13:25:42+00:00" - user_name: - description: The name of the IAM user to which the key is attached. - returned: success - type: str - sample: example_user - status: - description: - - The status of the key. - - C(Active) means it can be used. - - C(Inactive) means it can not be used. - returned: success - type: str - sample: Inactive -secret_access_key: - description: - - The secret access key. - - A secret access key is the equivalent of a password which can not be changed and as such should be considered sensitive data. - - Secret access keys can only be accessed at creation time. - returned: When a new key is created. - type: str - sample: example/Example+EXAMPLE+example/Example -deleted_access_key_id: - description: - - The access key deleted during rotation. - returned: When a key was deleted during the rotation of access keys - type: str - sample: AKIA1EXAMPLE1EXAMPLE -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -def delete_access_key(access_keys, user, access_key_id): - if not access_key_id: - return False - - if access_key_id not in access_keys: - return False - - if module.check_mode: - return True - - try: - client.delete_access_key( - aws_retry=True, - UserName=user, - AccessKeyId=access_key_id, - ) - except is_boto3_error_code('NoSuchEntityException'): - # Generally occurs when race conditions have happened and someone - # deleted the key while we were checking to see if it existed. - return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user) - ) - - return True - - -def update_access_key(access_keys, user, access_key_id, enabled): - if access_key_id not in access_keys: - module.fail_json( - msg='Access key "{0}" not found attached to User "{1}"'.format(access_key_id, user), - ) - - changes = dict() - access_key = access_keys.get(access_key_id) - - if enabled is not None: - desired_status = 'Active' if enabled else 'Inactive' - if access_key.get('status') != desired_status: - changes['Status'] = desired_status - - if not changes: - return False - - if module.check_mode: - return True - - try: - client.update_access_key( - aws_retry=True, - UserName=user, - AccessKeyId=access_key_id, - **changes - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, changes=changes, - msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user), - ) - return True - - -def create_access_key(access_keys, user, rotate_keys, enabled): - changed = False - oldest_key = False - - if len(access_keys) > 1 and rotate_keys: - sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get('create_date', None)) - oldest_key = sorted_keys[0] - changed |= delete_access_key(access_keys, user, oldest_key) - - if module.check_mode: - if changed: - return dict(deleted_access_key=oldest_key) - return True - - try: - results = client.create_access_key(aws_retry=True, UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user)) - results = camel_dict_to_snake_dict(results) - access_key = results.get('access_key') - access_key = normalize_boto3_result(access_key) - - # Update settings which can't be managed on creation - if enabled is False: - access_key_id = access_key['access_key_id'] - access_keys = {access_key_id: access_key} - update_access_key(access_keys, user, access_key_id, enabled) - access_key['status'] = 'Inactive' - - if oldest_key: - access_key['deleted_access_key'] = oldest_key - - return access_key - - -def get_access_keys(user): - try: - results = client.list_access_keys(aws_retry=True, UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg='Failed to get access keys for user "{0}"'.format(user) - ) - if not results: - return None - - results = camel_dict_to_snake_dict(results) - access_keys = results.get('access_key_metadata', []) - if not access_keys: - return [] - - access_keys = normalize_boto3_result(access_keys) - access_keys = {k['access_key_id']: k for k in access_keys} - return access_keys - - -def main(): - - global module - global client - - argument_spec = dict( - user_name=dict(required=True, type='str', aliases=['username']), - id=dict(required=False, type='str'), - state=dict(required=False, choices=['present', 'absent'], default='present'), - active=dict(required=False, type='bool', aliases=['enabled']), - rotate_keys=dict(required=False, type='bool', default=False), - ) - - required_if = [ - ['state', 'absent', ('id')], - ] - mutually_exclusive = [ - ['rotate_keys', 'id'], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - - changed = False - state = module.params.get('state') - user = module.params.get('user_name') - access_key_id = module.params.get('id') - rotate_keys = module.params.get('rotate_keys') - enabled = module.params.get('active') - - access_keys = get_access_keys(user) - results = dict() - - if state == 'absent': - changed |= delete_access_key(access_keys, user, access_key_id) - else: - # If we have an ID then we should try to update it - if access_key_id: - changed |= update_access_key(access_keys, user, access_key_id, enabled) - access_keys = get_access_keys(user) - results['access_key'] = access_keys.get(access_key_id, None) - # Otherwise we try to create a new one - else: - secret_key = create_access_key(access_keys, user, rotate_keys, enabled) - if isinstance(secret_key, bool): - changed |= secret_key - else: - changed = True - results['access_key_id'] = secret_key.get('access_key_id', None) - results['secret_access_key'] = secret_key.pop('secret_access_key', None) - results['deleted_access_key_id'] = secret_key.pop('deleted_access_key', None) - if secret_key: - results['access_key'] = secret_key - results = scrub_none_parameters(results) - - module.exit_json(changed=changed, **results) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py b/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py deleted file mode 100644 index 91429eff9..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2021 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: iam_access_key_info -version_added: 2.1.0 -short_description: fetch information about AWS IAM User access keys -description: - - 'Fetches information AWS IAM user access keys.' - - 'Note: It is not possible to fetch the secret access key.' -author: Mark Chappell (@tremble) -options: - user_name: - description: - - The name of the IAM User to which the keys belong. - required: true - type: str - aliases: ['username'] - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Fetch Access keys for a user - community.aws.iam_access_key_info: - user_name: example_user -''' - -RETURN = r''' -access_key: - description: A dictionary containing all the access key information. - returned: When the key exists. - type: list - elements: dict - contains: - access_key_id: - description: The ID for the access key. - returned: success - type: str - sample: AKIA1EXAMPLE1EXAMPLE - create_date: - description: The date and time, in ISO 8601 date-time format, when the access key was created. - returned: success - type: str - sample: "2021-10-09T13:25:42+00:00" - user_name: - description: The name of the IAM user to which the key is attached. - returned: success - type: str - sample: example_user - status: - description: - - The status of the key. - - C(Active) means it can be used. - - C(Inactive) means it can not be used. - returned: success - type: str - sample: Inactive -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -def get_access_keys(user): - try: - results = client.list_access_keys(aws_retry=True, UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg='Failed to get access keys for user "{0}"'.format(user) - ) - if not results: - return None - - results = camel_dict_to_snake_dict(results) - access_keys = results.get('access_key_metadata', []) - if not access_keys: - return [] - - access_keys = normalize_boto3_result(access_keys) - access_keys = sorted(access_keys, key=lambda d: d.get('create_date', None)) - return access_keys - - -def main(): - - global module - global client - - argument_spec = dict( - user_name=dict(required=True, type='str', aliases=['username']), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - - changed = False - user = module.params.get('user_name') - access_keys = get_access_keys(user) - - module.exit_json(changed=changed, access_keys=access_keys) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_group.py b/ansible_collections/community/aws/plugins/modules/iam_group.py deleted file mode 100644 index 31987ef1d..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_group.py +++ /dev/null @@ -1,433 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: iam_group -version_added: 1.0.0 -short_description: Manage AWS IAM groups -description: - - Manage AWS IAM groups. -author: -- Nick Aslanidis (@naslanidis) -- Maksym Postument (@infectsoldier) -options: - name: - description: - - The name of the group to create. - required: true - type: str - managed_policies: - description: - - A list of managed policy ARNs or friendly names to attach to the role. - - To embed an inline policy, use M(community.aws.iam_policy). - required: false - type: list - elements: str - default: [] - aliases: ['managed_policy'] - users: - description: - - A list of existing users to add as members of the group. - required: false - type: list - elements: str - default: [] - state: - description: - - Create or remove the IAM group. - required: true - choices: [ 'present', 'absent' ] - type: str - purge_policies: - description: - - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. - required: false - default: false - type: bool - aliases: ['purge_policy', 'purge_managed_policies'] - purge_users: - description: - - When I(purge_users=true) users which are not included in I(users) will be detached. - required: false - default: false - type: bool -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create a group - community.aws.iam_group: - name: testgroup1 - state: present - -- name: Create a group and attach a managed policy using its ARN - community.aws.iam_group: - name: testgroup1 - managed_policies: - - arn:aws:iam::aws:policy/AmazonSNSFullAccess - state: present - -- name: Create a group with users as members and attach a managed policy using its ARN - community.aws.iam_group: - name: testgroup1 - managed_policies: - - arn:aws:iam::aws:policy/AmazonSNSFullAccess - users: - - test_user1 - - test_user2 - state: present - -- name: Remove all managed policies from an existing group with an empty list - community.aws.iam_group: - name: testgroup1 - state: present - purge_policies: true - -- name: Remove all group members from an existing group - community.aws.iam_group: - name: testgroup1 - managed_policies: - - arn:aws:iam::aws:policy/AmazonSNSFullAccess - purge_users: true - state: present - -- name: Delete the group - community.aws.iam_group: - name: testgroup1 - state: absent - -''' -RETURN = r''' -iam_group: - description: dictionary containing all the group information including group membership - returned: success - type: complex - contains: - group: - description: dictionary containing all the group information - returned: success - type: complex - contains: - arn: - description: the Amazon Resource Name (ARN) specifying the group - type: str - sample: "arn:aws:iam::1234567890:group/testgroup1" - create_date: - description: the date and time, in ISO 8601 date-time format, when the group was created - type: str - sample: "2017-02-08T04:36:28+00:00" - group_id: - description: the stable and unique string identifying the group - type: str - sample: AGPA12345EXAMPLE54321 - group_name: - description: the friendly name that identifies the group - type: str - sample: testgroup1 - path: - description: the path to the group - type: str - sample: / - users: - description: list containing all the group members - returned: success - type: complex - contains: - arn: - description: the Amazon Resource Name (ARN) specifying the user - type: str - sample: "arn:aws:iam::1234567890:user/test_user1" - create_date: - description: the date and time, in ISO 8601 date-time format, when the user was created - type: str - sample: "2017-02-08T04:36:28+00:00" - user_id: - description: the stable and unique string identifying the user - type: str - sample: AIDA12345EXAMPLE54321 - user_name: - description: the friendly name that identifies the user - type: str - sample: testgroup1 - path: - description: the path to the user - type: str - sample: / -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -def compare_attached_group_policies(current_attached_policies, new_attached_policies): - - # If new_attached_policies is None it means we want to remove all policies - if len(current_attached_policies) > 0 and new_attached_policies is None: - return False - - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) - - if set(current_attached_policies_arn_list) == set(new_attached_policies): - return True - else: - return False - - -def compare_group_members(current_group_members, new_group_members): - - # If new_attached_policies is None it means we want to remove all policies - if len(current_group_members) > 0 and new_group_members is None: - return False - if set(current_group_members) == set(new_group_members): - return True - else: - return False - - -def convert_friendly_names_to_arns(connection, module, policy_names): - - if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): - return policy_names - allpolicies = {} - paginator = connection.get_paginator('list_policies') - policies = paginator.paginate().build_full_result()['Policies'] - - for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] - try: - return [allpolicies[policy] for policy in policy_names] - except KeyError as e: - module.fail_json(msg="Couldn't find policy: " + str(e)) - - -def create_or_update_group(connection, module): - - params = dict() - params['GroupName'] = module.params.get('name') - managed_policies = module.params.get('managed_policies') - users = module.params.get('users') - purge_users = module.params.get('purge_users') - purge_policies = module.params.get('purge_policies') - changed = False - if managed_policies: - managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) - - # Get group - try: - group = get_group(connection, module, params['GroupName']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get group") - - # If group is None, create it - if group is None: - # Check mode means we would create the group - if module.check_mode: - module.exit_json(changed=True) - - try: - group = connection.create_group(**params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create group") - - # Manage managed policies - current_attached_policies = get_attached_policy_list(connection, module, params['GroupName']) - if not compare_attached_group_policies(current_attached_policies, managed_policies): - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) - - # If managed_policies has a single empty element we want to remove all attached policies - if purge_policies: - # Detach policies not present - for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): - changed = True - if not module.check_mode: - try: - connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName']) - # If there are policies to adjust that aren't in the current list, then things have changed - # Otherwise the only changes were in purging above - if set(managed_policies) - set(current_attached_policies_arn_list): - changed = True - # If there are policies in managed_policies attach each policy - if managed_policies != [None] and not module.check_mode: - for policy_arn in managed_policies: - try: - connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName']) - - # Manage group memberships - try: - current_group_members = get_group(connection, module, params['GroupName'])['Users'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) - - current_group_members_list = [] - for member in current_group_members: - current_group_members_list.append(member['UserName']) - - if not compare_group_members(current_group_members_list, users): - - if purge_users: - for user in list(set(current_group_members_list) - set(users)): - # Ensure we mark things have changed if any user gets purged - changed = True - # Skip actions for check mode - if not module.check_mode: - try: - connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName'])) - # If there are users to adjust that aren't in the current list, then things have changed - # Otherwise the only changes were in purging above - if set(users) - set(current_group_members_list): - changed = True - # Skip actions for check mode - if users != [None] and not module.check_mode: - for user in users: - try: - connection.add_user_to_group(GroupName=params['GroupName'], UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName'])) - if module.check_mode: - module.exit_json(changed=changed) - - # Get the group again - try: - group = get_group(connection, module, params['GroupName']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) - - module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) - - -def destroy_group(connection, module): - - params = dict() - params['GroupName'] = module.params.get('name') - - try: - group = get_group(connection, module, params['GroupName']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) - if group: - # Check mode means we would remove this group - if module.check_mode: - module.exit_json(changed=True) - - # Remove any attached policies otherwise deletion fails - try: - for policy in get_attached_policy_list(connection, module, params['GroupName']): - connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName']) - - # Remove any users in the group otherwise deletion fails - current_group_members_list = [] - try: - current_group_members = get_group(connection, module, params['GroupName'])['Users'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) - for member in current_group_members: - current_group_members_list.append(member['UserName']) - for user in current_group_members_list: - try: - connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName'])) - - try: - connection.delete_group(**params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName']) - - else: - module.exit_json(changed=False) - - module.exit_json(changed=True) - - -@AWSRetry.exponential_backoff() -def get_group(connection, module, name): - try: - paginator = connection.get_paginator('get_group') - return paginator.paginate(GroupName=name).build_full_result() - except is_boto3_error_code('NoSuchEntity'): - return None - - -@AWSRetry.exponential_backoff() -def get_attached_policy_list(connection, module, name): - - try: - paginator = connection.get_paginator('list_attached_group_policies') - return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies'] - except is_boto3_error_code('NoSuchEntity'): - return None - - -def main(): - - argument_spec = dict( - name=dict(required=True), - managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), - users=dict(default=[], type='list', elements='str'), - state=dict(choices=['present', 'absent'], required=True), - purge_users=dict(default=False, type='bool'), - purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - connection = module.client('iam') - - state = module.params.get("state") - - if state == 'present': - create_or_update_group(connection, module) - else: - destroy_group(connection, module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py b/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py deleted file mode 100644 index f86f019d5..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py +++ /dev/null @@ -1,371 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: iam_managed_policy -version_added: 1.0.0 -short_description: Manage User Managed IAM policies -description: - - Allows creating and removing managed IAM policies -options: - policy_name: - description: - - The name of the managed policy. - required: True - type: str - policy_description: - description: - - A helpful description of this policy, this value is immutable and only set when creating a new policy. - default: '' - type: str - policy: - description: - - A properly json formatted policy - type: json - make_default: - description: - - Make this revision the default revision. - default: True - type: bool - only_version: - description: - - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted. - type: bool - default: false - state: - description: - - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found. - default: present - choices: [ "present", "absent" ] - type: str - -author: "Dan Kozlowski (@dkhenry)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -EXAMPLES = r''' -# Create a policy -- name: Create IAM Managed Policy - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy_description: "A Helpful managed policy" - policy: "{{ lookup('template', 'managed_policy.json.j2') }}" - state: present - -# Update a policy with a new default version -- name: Update an IAM Managed Policy with new default version - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy: "{{ lookup('file', 'managed_policy_update.json') }}" - state: present - -# Update a policy with a new non default version -- name: Update an IAM Managed Policy with a non default version - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Allow" - Action: "logs:CreateLogGroup" - Resource: "*" - make_default: false - state: present - -# Update a policy and make it the only version and the default version -- name: Update an IAM Managed Policy with default version as the only version - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy: | - { - "Version": "2012-10-17", - "Statement":[{ - "Effect": "Allow", - "Action": "logs:PutRetentionPolicy", - "Resource": "*" - }] - } - only_version: true - state: present - -# Remove a policy -- name: Remove an existing IAM Managed Policy - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - state: absent -''' - -RETURN = r''' -policy: - description: Returns the policy json structure, when state == absent this will return the value of the removed policy. - returned: success - type: complex - contains: {} - sample: '{ - "arn": "arn:aws:iam::aws:policy/AdministratorAccess " - "attachment_count": 0, - "create_date": "2017-03-01T15:42:55.981000+00:00", - "default_version_id": "v1", - "is_attachable": true, - "path": "/", - "policy_id": "ANPA1245EXAMPLE54321", - "policy_name": "AdministratorAccess", - "update_date": "2017-03-01T15:42:55.981000+00:00" - }' -''' - -import json - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def list_policies_with_backoff(): - paginator = client.get_paginator('list_policies') - return paginator.paginate(Scope='Local').build_full_result() - - -def get_policy_by_name(name): - try: - response = list_policies_with_backoff() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policies") - for policy in response['Policies']: - if policy['PolicyName'] == name: - return policy - return None - - -def delete_oldest_non_default_version(policy): - try: - versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] - if not v['IsDefaultVersion']] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - versions.sort(key=lambda v: v['CreateDate'], reverse=True) - for v in versions[-1:]: - try: - client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy version") - - -# This needs to return policy_version, changed -def get_or_create_policy_version(policy, policy_document): - try: - versions = client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - - for v in versions: - try: - document = client.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId'])) - - if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): - return v, True - - # If the current policy matches the existing one - if not compare_policies(document, json.loads(to_native(policy_document))): - return v, False - - # No existing version so create one - # There is a service limit (typically 5) of policy versions. - # - # Rather than assume that it is 5, we'll try to create the policy - # and if that doesn't work, delete the oldest non default policy version - # and try again. - try: - version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] - return version, True - except is_boto3_error_code('LimitExceeded'): - delete_oldest_non_default_version(policy) - try: - version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] - return version, True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: - module.fail_json_aws(second_e, msg="Couldn't create policy version") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create policy version") - - -def set_if_default(policy, policy_version, is_default): - if is_default and not policy_version['IsDefaultVersion']: - try: - client.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't set default policy version") - return True - return False - - -def set_if_only(policy, policy_version, is_only): - if is_only: - try: - versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])[ - 'Versions'] if not v['IsDefaultVersion']] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - for v in versions: - try: - client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy version") - return len(versions) > 0 - return False - - -def detach_all_entities(policy, **kwargs): - try: - entities = client.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName'])) - - for g in entities['PolicyGroups']: - try: - client.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName'])) - for u in entities['PolicyUsers']: - try: - client.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName'])) - for r in entities['PolicyRoles']: - try: - client.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName'])) - if entities['IsTruncated']: - detach_all_entities(policy, marker=entities['Marker']) - - -def create_or_update_policy(existing_policy): - name = module.params.get('policy_name') - description = module.params.get('policy_description') - default = module.params.get('make_default') - only = module.params.get('only_version') - - policy = None - - if module.params.get('policy') is not None: - policy = json.dumps(json.loads(module.params.get('policy'))) - - if existing_policy is None: - if module.check_mode: - module.exit_json(changed=True) - - # Create policy when none already exists - try: - rvalue = client.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) - - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) - else: - policy_version, changed = get_or_create_policy_version(existing_policy, policy) - changed = set_if_default(existing_policy, policy_version, default) or changed - changed = set_if_only(existing_policy, policy_version, only) or changed - - # If anything has changed we need to refresh the policy - if changed: - try: - updated_policy = client.get_policy(PolicyArn=existing_policy['Arn'])['Policy'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Couldn't get policy") - - module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(updated_policy)) - else: - module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(existing_policy)) - - -def delete_policy(existing_policy): - # Check for existing policy - if existing_policy: - if module.check_mode: - module.exit_json(changed=True) - - # Detach policy - detach_all_entities(existing_policy) - # Delete Versions - try: - versions = client.list_policy_versions(PolicyArn=existing_policy['Arn'])['Versions'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - for v in versions: - if not v['IsDefaultVersion']: - try: - client.delete_policy_version(PolicyArn=existing_policy['Arn'], VersionId=v['VersionId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) - # Delete policy - try: - client.delete_policy(PolicyArn=existing_policy['Arn']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy['PolicyName'])) - - # This is the one case where we will return the old policy - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) - else: - module.exit_json(changed=False, policy=None) - - -def main(): - global module - global client - - argument_spec = dict( - policy_name=dict(required=True), - policy_description=dict(default=''), - policy=dict(type='json'), - make_default=dict(type='bool', default=True), - only_version=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[['state', 'present', ['policy']]], - supports_check_mode=True - ) - - name = module.params.get('policy_name') - state = module.params.get('state') - - try: - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - existing_policy = get_policy_by_name(name) - - if state == 'present': - create_or_update_policy(existing_policy) - else: - delete_policy(existing_policy) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py b/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py deleted file mode 100644 index 16abae170..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: iam_mfa_device_info -version_added: 1.0.0 -short_description: List the MFA (Multi-Factor Authentication) devices registered for a user -description: - - List the MFA (Multi-Factor Authentication) devices registered for a user -author: Victor Costan (@pwnall) -options: - user_name: - description: - - The name of the user whose MFA devices will be listed - type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -RETURN = """ -mfa_devices: - description: The MFA devices registered for the given user - returned: always - type: list - sample: - - enable_date: "2016-03-11T23:25:36+00:00" - serial_number: arn:aws:iam::123456789012:mfa/example - user_name: example - - enable_date: "2016-03-11T23:25:37+00:00" - serial_number: arn:aws:iam::123456789012:mfa/example - user_name: example -""" - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html -- name: List MFA devices - community.aws.iam_mfa_device_info: - register: mfa_devices - -# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html -- name: Assume an existing role - community.aws.sts_assume_role: - mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}" - role_arn: "arn:aws:iam::123456789012:role/someRole" - role_session_name: "someRoleSession" - register: assumed_role -''' - -try: - import botocore - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - - -def list_mfa_devices(connection, module): - user_name = module.params.get('user_name') - changed = False - - args = {} - if user_name is not None: - args['UserName'] = user_name - try: - response = connection.list_mfa_devices(**args) - except ClientError as e: - module.fail_json_aws(e, msg="Failed to list MFA devices") - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) - - -def main(): - argument_spec = dict( - user_name=dict(required=False, default=None), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - connection = module.client('iam') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - list_mfa_devices(connection, module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_password_policy.py b/ansible_collections/community/aws/plugins/modules/iam_password_policy.py deleted file mode 100644 index 19614d26d..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_password_policy.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python - -# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: iam_password_policy -version_added: 1.0.0 -short_description: Update an IAM Password Policy -description: - - Module updates an IAM Password Policy on a given AWS account -author: - - "Aaron Smith (@slapula)" -options: - state: - description: - - Specifies the overall state of the password policy. - required: true - choices: ['present', 'absent'] - type: str - min_pw_length: - description: - - Minimum password length. - default: 6 - aliases: [minimum_password_length] - type: int - require_symbols: - description: - - Require symbols in password. - default: false - type: bool - require_numbers: - description: - - Require numbers in password. - default: false - type: bool - require_uppercase: - description: - - Require uppercase letters in password. - default: false - type: bool - require_lowercase: - description: - - Require lowercase letters in password. - default: false - type: bool - allow_pw_change: - description: - - Allow users to change their password. - default: false - type: bool - aliases: [allow_password_change] - pw_max_age: - description: - - Maximum age for a password in days. When this option is 0 then passwords - do not expire automatically. - default: 0 - aliases: [password_max_age] - type: int - pw_reuse_prevent: - description: - - Prevent re-use of passwords. - default: 0 - aliases: [password_reuse_prevent, prevent_reuse] - type: int - pw_expire: - description: - - Prevents users from change an expired password. - default: false - type: bool - aliases: [password_expire, expire] -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' - -EXAMPLES = ''' -- name: Password policy for AWS account - community.aws.iam_password_policy: - state: present - min_pw_length: 8 - require_symbols: false - require_numbers: true - require_uppercase: true - require_lowercase: true - allow_pw_change: true - pw_max_age: 60 - pw_reuse_prevent: 5 - pw_expire: false -''' - -RETURN = ''' # ''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - - -class IAMConnection(object): - def __init__(self, module): - try: - self.connection = module.resource('iam') - self.module = module - except Exception as e: - module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) - - def policy_to_dict(self, policy): - policy_attributes = [ - 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry', - 'max_password_age', 'minimum_password_length', 'password_reuse_prevention', - 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters' - ] - ret = {} - for attr in policy_attributes: - ret[attr] = getattr(policy, attr) - return ret - - def update_password_policy(self, module, policy): - min_pw_length = module.params.get('min_pw_length') - require_symbols = module.params.get('require_symbols') - require_numbers = module.params.get('require_numbers') - require_uppercase = module.params.get('require_uppercase') - require_lowercase = module.params.get('require_lowercase') - allow_pw_change = module.params.get('allow_pw_change') - pw_max_age = module.params.get('pw_max_age') - pw_reuse_prevent = module.params.get('pw_reuse_prevent') - pw_expire = module.params.get('pw_expire') - - update_parameters = dict( - MinimumPasswordLength=min_pw_length, - RequireSymbols=require_symbols, - RequireNumbers=require_numbers, - RequireUppercaseCharacters=require_uppercase, - RequireLowercaseCharacters=require_lowercase, - AllowUsersToChangePassword=allow_pw_change, - HardExpiry=pw_expire - ) - if pw_reuse_prevent: - update_parameters.update(PasswordReusePrevention=pw_reuse_prevent) - if pw_max_age: - update_parameters.update(MaxPasswordAge=pw_max_age) - - try: - original_policy = self.policy_to_dict(policy) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - original_policy = {} - - try: - results = policy.update(**update_parameters) - policy.reload() - updated_policy = self.policy_to_dict(policy) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy") - - changed = (original_policy != updated_policy) - return (changed, updated_policy, camel_dict_to_snake_dict(results)) - - def delete_password_policy(self, policy): - try: - results = policy.delete() - except is_boto3_error_code('NoSuchEntity'): - self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") - return camel_dict_to_snake_dict(results) - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - 'state': dict(choices=['present', 'absent'], required=True), - 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6), - 'require_symbols': dict(type='bool', default=False), - 'require_numbers': dict(type='bool', default=False), - 'require_uppercase': dict(type='bool', default=False), - 'require_lowercase': dict(type='bool', default=False), - 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False), - 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0), - 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0), - 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False), - }, - supports_check_mode=True, - ) - - resource = IAMConnection(module) - policy = resource.connection.AccountPasswordPolicy() - - state = module.params.get('state') - - if state == 'present': - (changed, new_policy, update_result) = resource.update_password_policy(module, policy) - module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy) - - if state == 'absent': - delete_result = resource.delete_password_policy(policy) - module.exit_json(changed=True, task_status={'IAM': delete_result}) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_role.py b/ansible_collections/community/aws/plugins/modules/iam_role.py deleted file mode 100644 index 4add6a525..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_role.py +++ /dev/null @@ -1,736 +0,0 @@ -#!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: iam_role -version_added: 1.0.0 -short_description: Manage AWS IAM roles -description: - - Manage AWS IAM roles. -author: - - "Rob White (@wimnat)" -options: - path: - description: - - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). - default: "/" - type: str - name: - description: - - The name of the role to create. - required: true - type: str - description: - description: - - Provides a description of the role. - type: str - boundary: - description: - - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates. - - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false). - - This is intended for roles/users that have permissions to create new IAM objects. - - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html). - aliases: [boundary_policy_arn] - type: str - assume_role_policy_document: - description: - - The trust relationship policy document that grants an entity permission to assume the role. - - This parameter is required when I(state=present). - type: json - managed_policies: - description: - - A list of managed policy ARNs, managed policy ARNs or friendly names. - - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]). - - To embed an inline policy, use M(community.aws.iam_policy). - aliases: ['managed_policy'] - type: list - elements: str - max_session_duration: - description: - - The maximum duration (in seconds) of a session when assuming the role. - - Valid values are between 1 and 12 hours (3600 and 43200 seconds). - type: int - purge_policies: - description: - - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. - type: bool - aliases: ['purge_policy', 'purge_managed_policies'] - default: true - state: - description: - - Create or remove the IAM role. - default: present - choices: [ present, absent ] - type: str - create_instance_profile: - description: - - Creates an IAM instance profile along with the role. - default: true - type: bool - delete_instance_profile: - description: - - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance - profile created with the same I(name) as the role. - - Only applies when I(state=absent). - default: false - type: bool - wait_timeout: - description: - - How long (in seconds) to wait for creation / update to complete. - default: 120 - type: int - wait: - description: - - When I(wait=True) the module will wait for up to I(wait_timeout) seconds - for IAM role creation before returning. - default: True - type: bool -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create a role with description and tags - community.aws.iam_role: - name: mynewrole - assume_role_policy_document: "{{ lookup('file','policy.json') }}" - description: This is My New Role - tags: - env: dev - -- name: "Create a role and attach a managed policy called 'PowerUserAccess'" - community.aws.iam_role: - name: mynewrole - assume_role_policy_document: "{{ lookup('file','policy.json') }}" - managed_policies: - - arn:aws:iam::aws:policy/PowerUserAccess - -- name: Keep the role created above but remove all managed policies - community.aws.iam_role: - name: mynewrole - assume_role_policy_document: "{{ lookup('file','policy.json') }}" - managed_policies: [] - -- name: Delete the role - community.aws.iam_role: - name: mynewrole - assume_role_policy_document: "{{ lookup('file', 'policy.json') }}" - state: absent - -''' -RETURN = r''' -iam_role: - description: dictionary containing the IAM Role data - returned: success - type: complex - contains: - path: - description: the path to the role - type: str - returned: always - sample: / - role_name: - description: the friendly name that identifies the role - type: str - returned: always - sample: myrole - role_id: - description: the stable and unique string identifying the role - type: str - returned: always - sample: ABCDEFF4EZ4ABCDEFV4ZC - arn: - description: the Amazon Resource Name (ARN) specifying the role - type: str - returned: always - sample: "arn:aws:iam::1234567890:role/mynewrole" - create_date: - description: the date and time, in ISO 8601 date-time format, when the role was created - type: str - returned: always - sample: "2016-08-14T04:36:28+00:00" - assume_role_policy_document: - description: - - the policy that grants an entity permission to assume the role - - | - note: the case of keys in this dictionary are currently converted from CamelCase to - snake_case. In a release after 2023-12-01 this behaviour will change - type: dict - returned: always - sample: { - 'statement': [ - { - 'action': 'sts:AssumeRole', - 'effect': 'Allow', - 'principal': { - 'service': 'ec2.amazonaws.com' - }, - 'sid': '' - } - ], - 'version': '2012-10-17' - } - assume_role_policy_document_raw: - description: the policy that grants an entity permission to assume the role - type: dict - returned: always - version_added: 5.3.0 - sample: { - 'Statement': [ - { - 'Action': 'sts:AssumeRole', - 'Effect': 'Allow', - 'Principal': { - 'Service': 'ec2.amazonaws.com' - }, - 'Sid': '' - } - ], - 'Version': '2012-10-17' - } - - attached_policies: - description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role - type: list - returned: always - sample: [ - { - 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess', - 'policy_name': 'PowerUserAccess' - } - ] - tags: - description: role tags - type: dict - returned: always - sample: '{"Env": "Prod"}' -''' - -import json - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies - - -@AWSRetry.jittered_backoff() -def _list_policies(client): - paginator = client.get_paginator('list_policies') - return paginator.paginate().build_full_result()['Policies'] - - -def wait_iam_exists(module, client): - if module.check_mode: - return - if not module.params.get('wait'): - return - - role_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') - - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - - try: - waiter = client.get_waiter('role_exists') - waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, - RoleName=role_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on IAM role creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on IAM role creation') - - -def convert_friendly_names_to_arns(module, client, policy_names): - if not any(not policy.startswith('arn:') for policy in policy_names): - return policy_names - - allpolicies = {} - policies = _list_policies(client) - - for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] - try: - return [allpolicies[policy] for policy in policy_names] - except KeyError as e: - module.fail_json_aws(e, msg="Couldn't find policy") - - -def attach_policies(module, client, policies_to_attach, role_name): - if module.check_mode and policies_to_attach: - return True - - changed = False - for policy_arn in policies_to_attach: - try: - client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn, aws_retry=True) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, role_name)) - return changed - - -def remove_policies(module, client, policies_to_remove, role_name): - if module.check_mode and policies_to_remove: - return True - - changed = False - for policy in policies_to_remove: - try: - client.detach_role_policy(RoleName=role_name, PolicyArn=policy, aws_retry=True) - changed = True - except is_boto3_error_code('NoSuchEntityException'): - pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name)) - return changed - - -def remove_inline_policies(module, client, role_name): - current_inline_policies = get_inline_policy_list(module, client, role_name) - for policy in current_inline_policies: - try: - client.delete_role_policy(RoleName=role_name, PolicyName=policy, aws_retry=True) - except is_boto3_error_code('NoSuchEntityException'): - pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name)) - - -def generate_create_params(module): - params = dict() - params['Path'] = module.params.get('path') - params['RoleName'] = module.params.get('name') - params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document') - if module.params.get('description') is not None: - params['Description'] = module.params.get('description') - if module.params.get('max_session_duration') is not None: - params['MaxSessionDuration'] = module.params.get('max_session_duration') - if module.params.get('boundary') is not None: - params['PermissionsBoundary'] = module.params.get('boundary') - if module.params.get('tags') is not None: - params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) - - return params - - -def create_basic_role(module, client): - """ - Perform the Role creation. - Assumes tests for the role existing have already been performed. - """ - if module.check_mode: - module.exit_json(changed=True) - - try: - params = generate_create_params(module) - role = client.create_role(aws_retry=True, **params) - # 'Description' is documented as key of the role returned by create_role - # but appears to be an AWS bug (the value is not returned using the AWS CLI either). - # Get the role after creating it. - role = get_role_with_backoff(module, client, params['RoleName']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create role") - - return role - - -def update_role_assumed_policy(module, client, role_name, target_assumed_policy, current_assumed_policy): - # Check Assumed Policy document - if target_assumed_policy is None or not compare_policies(current_assumed_policy, json.loads(target_assumed_policy)): - return False - - if module.check_mode: - return True - - try: - client.update_assume_role_policy( - RoleName=role_name, - PolicyDocument=target_assumed_policy, - aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name)) - return True - - -def update_role_description(module, client, role_name, target_description, current_description): - # Check Description update - if target_description is None or current_description == target_description: - return False - - if module.check_mode: - return True - - try: - client.update_role(RoleName=role_name, Description=target_description, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update description for role {0}".format(role_name)) - return True - - -def update_role_max_session_duration(module, client, role_name, target_duration, current_duration): - # Check MaxSessionDuration update - if target_duration is None or current_duration == target_duration: - return False - - if module.check_mode: - return True - - try: - client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(role_name)) - return True - - -def update_role_permissions_boundary(module, client, role_name, target_permissions_boundary, current_permissions_boundary): - # Check PermissionsBoundary - if target_permissions_boundary is None or target_permissions_boundary == current_permissions_boundary: - return False - - if module.check_mode: - return True - - if target_permissions_boundary == '': - try: - client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name)) - else: - try: - client.put_role_permissions_boundary(RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name)) - return True - - -def update_managed_policies(module, client, role_name, managed_policies, purge_policies): - # Check Managed Policies - if managed_policies is None: - return False - - # Get list of current attached managed policies - current_attached_policies = get_attached_policy_list(module, client, role_name) - current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies] - - if len(managed_policies) == 1 and managed_policies[0] is None: - managed_policies = [] - - policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies) - policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list) - - changed = False - if purge_policies and policies_to_remove: - if module.check_mode: - return True - else: - changed |= remove_policies(module, client, policies_to_remove, role_name) - - if policies_to_attach: - if module.check_mode: - return True - else: - changed |= attach_policies(module, client, policies_to_attach, role_name) - - return changed - - -def create_or_update_role(module, client): - - role_name = module.params.get('name') - assumed_policy = module.params.get('assume_role_policy_document') - create_instance_profile = module.params.get('create_instance_profile') - description = module.params.get('description') - duration = module.params.get('max_session_duration') - path = module.params.get('path') - permissions_boundary = module.params.get('boundary') - purge_tags = module.params.get('purge_tags') - tags = ansible_dict_to_boto3_tag_list(module.params.get('tags')) if module.params.get('tags') else None - purge_policies = module.params.get('purge_policies') - managed_policies = module.params.get('managed_policies') - if managed_policies: - # Attempt to list the policies early so we don't leave things behind if we can't find them. - managed_policies = convert_friendly_names_to_arns(module, client, managed_policies) - - changed = False - - # Get role - role = get_role(module, client, role_name) - - # If role is None, create it - if role is None: - role = create_basic_role(module, client) - - if not module.check_mode and module.params.get('wait'): - wait_iam_exists(module, client) - - changed = True - else: - # Role exists - get current attributes - current_assumed_policy = role.get('AssumeRolePolicyDocument') - current_description = role.get('Description') - current_duration = role.get('MaxSessionDuration') - current_permissions_boundary = role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', '') - - # Update attributes - changed |= update_role_tags(module, client, role_name, tags, purge_tags) - changed |= update_role_assumed_policy(module, client, role_name, assumed_policy, current_assumed_policy) - changed |= update_role_description(module, client, role_name, description, current_description) - changed |= update_role_max_session_duration(module, client, role_name, duration, current_duration) - changed |= update_role_permissions_boundary(module, client, role_name, permissions_boundary, current_permissions_boundary) - - if not module.check_mode and module.params.get('wait'): - wait_iam_exists(module, client) - - if create_instance_profile: - changed |= create_instance_profiles(module, client, role_name, path) - - if not module.check_mode and module.params.get('wait'): - wait_iam_exists(module, client) - - changed |= update_managed_policies(module, client, role_name, managed_policies, purge_policies) - wait_iam_exists(module, client) - - # Get the role again - role = get_role(module, client, role_name) - role['AttachedPolicies'] = get_attached_policy_list(module, client, role_name) - role['tags'] = get_role_tags(module, client) - - camel_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) - camel_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument", {}) - module.exit_json(changed=changed, iam_role=camel_role, **camel_role) - - -def create_instance_profiles(module, client, role_name, path): - - # Fetch existing Profiles - try: - instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)['InstanceProfiles'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) - - # Profile already exists - if any(p['InstanceProfileName'] == role_name for p in instance_profiles): - return False - - if module.check_mode: - return True - - # Make sure an instance profile is created - try: - client.create_instance_profile(InstanceProfileName=role_name, Path=path, aws_retry=True) - except is_boto3_error_code('EntityAlreadyExists'): - # If the profile already exists, no problem, move on. - # Implies someone's changing things at the same time... - return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name)) - - # And attach the role to the profile - try: - client.add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(role_name)) - - return True - - -def remove_instance_profiles(module, client, role_name): - delete_profiles = module.params.get("delete_instance_profile") - - try: - instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)['InstanceProfiles'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) - - # Remove the role from the instance profile(s) - for profile in instance_profiles: - profile_name = profile['InstanceProfileName'] - try: - if not module.check_mode: - client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name) - if profile_name == role_name: - if delete_profiles: - try: - client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) - except is_boto3_error_code('NoSuchEntityException'): - pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name)) - - -def destroy_role(module, client): - - role_name = module.params.get('name') - role = get_role(module, client, role_name) - - if role is None: - module.exit_json(changed=False) - - if not module.check_mode: - # Before we try to delete the role we need to remove any - # - attached instance profiles - # - attached managed policies - # - embedded inline policies - remove_instance_profiles(module, client, role_name) - update_managed_policies(module, client, role_name, [], True) - remove_inline_policies(module, client, role_name) - try: - client.delete_role(aws_retry=True, RoleName=role_name) - except is_boto3_error_code('NoSuchEntityException'): - module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete role") - - module.exit_json(changed=True) - - -def get_role_with_backoff(module, client, name): - try: - return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(client.get_role)(RoleName=name)['Role'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) - - -def get_role(module, client, name): - try: - return client.get_role(RoleName=name, aws_retry=True)['Role'] - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) - - -def get_attached_policy_list(module, client, name): - try: - return client.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) - - -def get_inline_policy_list(module, client, name): - try: - return client.list_role_policies(RoleName=name, aws_retry=True)['PolicyNames'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) - - -def get_role_tags(module, client): - role_name = module.params.get('name') - try: - return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) - - -def update_role_tags(module, client, role_name, new_tags, purge_tags): - if new_tags is None: - return False - new_tags = boto3_tag_list_to_ansible_dict(new_tags) - - try: - existing_tags = boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): - existing_tags = {} - - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) - - if not module.check_mode: - try: - if tags_to_remove: - client.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True) - if tags_to_add: - client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name) - - changed = bool(tags_to_add) or bool(tags_to_remove) - return changed - - -def main(): - - argument_spec = dict( - name=dict(type='str', required=True), - path=dict(type='str', default="/"), - assume_role_policy_document=dict(type='json'), - managed_policies=dict(type='list', aliases=['managed_policy'], elements='str'), - max_session_duration=dict(type='int'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - description=dict(type='str'), - boundary=dict(type='str', aliases=['boundary_policy_arn']), - create_instance_profile=dict(type='bool', default=True), - delete_instance_profile=dict(type='bool', default=False), - purge_policies=dict(default=True, type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=120, type='int'), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[('state', 'present', ['assume_role_policy_document'])], - supports_check_mode=True) - - module.deprecate("All return values other than iam_role and changed have been deprecated and " - "will be removed in a release after 2023-12-01.", - date="2023-12-01", collection_name="community.aws") - - module.deprecate("In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - "iam_role.assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", collection_name="community.aws") - - if module.params.get('boundary'): - if module.params.get('create_instance_profile'): - module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") - if not module.params.get('boundary').startswith('arn:aws:iam'): - module.fail_json(msg="Boundary policy must be an ARN") - if module.params.get('max_session_duration'): - max_session_duration = module.params.get('max_session_duration') - if max_session_duration < 3600 or max_session_duration > 43200: - module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)") - if module.params.get('path'): - path = module.params.get('path') - if not path.endswith('/') or not path.startswith('/'): - module.fail_json(msg="path must begin and end with /") - - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - - state = module.params.get("state") - - if state == 'present': - create_or_update_role(module, client) - elif state == 'absent': - destroy_role(module, client) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_role_info.py b/ansible_collections/community/aws/plugins/modules/iam_role_info.py deleted file mode 100644 index d66be487a..000000000 --- a/ansible_collections/community/aws/plugins/modules/iam_role_info.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: iam_role_info -version_added: 1.0.0 -short_description: Gather information on IAM roles -description: - - Gathers information about IAM roles. -author: - - "Will Thames (@willthames)" -options: - name: - description: - - Name of a role to search for. - - Mutually exclusive with I(path_prefix). - aliases: - - role_name - type: str - path_prefix: - description: - - Prefix of role to restrict IAM role search for. - - Mutually exclusive with I(name). - type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' - -EXAMPLES = ''' -- name: find all existing IAM roles - community.aws.iam_role_info: - register: result - -- name: describe a single role - community.aws.iam_role_info: - name: MyIAMRole - -- name: describe all roles matching a path prefix - community.aws.iam_role_info: - path_prefix: /application/path -''' - -RETURN = ''' -iam_roles: - description: List of IAM roles - returned: always - type: complex - contains: - arn: - description: Amazon Resource Name for IAM role. - returned: always - type: str - sample: arn:aws:iam::123456789012:role/AnsibleTestRole - assume_role_policy_document: - description: - - The policy that grants an entity permission to assume the role - - | - Note: the case of keys in this dictionary are currently converted from CamelCase to - snake_case. In a release after 2023-12-01 this behaviour will change. - returned: always - type: dict - assume_role_policy_document_raw: - description: The policy document describing what can assume the role. - returned: always - type: dict - version_added: 5.3.0 - create_date: - description: Date IAM role was created. - returned: always - type: str - sample: '2017-10-23T00:05:08+00:00' - inline_policies: - description: List of names of inline policies. - returned: always - type: list - sample: [] - managed_policies: - description: List of attached managed policies. - returned: always - type: complex - contains: - policy_arn: - description: Amazon Resource Name for the policy. - returned: always - type: str - sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy - policy_name: - description: Name of managed policy. - returned: always - type: str - sample: AnsibleTestEC2Policy - instance_profiles: - description: List of attached instance profiles. - returned: always - type: complex - contains: - arn: - description: Amazon Resource Name for the instance profile. - returned: always - type: str - sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy - create_date: - description: Date instance profile was created. - returned: always - type: str - sample: '2017-10-23T00:05:08+00:00' - instance_profile_id: - description: Amazon Identifier for the instance profile. - returned: always - type: str - sample: AROAII7ABCD123456EFGH - instance_profile_name: - description: Name of instance profile. - returned: always - type: str - sample: AnsibleTestEC2Policy - path: - description: Path of instance profile. - returned: always - type: str - sample: / - roles: - description: List of roles associated with this instance profile. - returned: always - type: list - sample: [] - path: - description: Path of role. - returned: always - type: str - sample: / - role_id: - description: Amazon Identifier for the role. - returned: always - type: str - sample: AROAII7ABCD123456EFGH - role_name: - description: Name of the role. - returned: always - type: str - sample: AnsibleTestRole - tags: - description: Role tags. - type: dict - returned: always - sample: '{"Env": "Prod"}' -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - - -@AWSRetry.jittered_backoff() -def list_iam_roles_with_backoff(client, **kwargs): - paginator = client.get_paginator('list_roles') - return paginator.paginate(**kwargs).build_full_result() - - -@AWSRetry.jittered_backoff() -def list_iam_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_role_policies') - return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames'] - - -@AWSRetry.jittered_backoff() -def list_iam_attached_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_attached_role_policies') - return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies'] - - -@AWSRetry.jittered_backoff() -def list_iam_instance_profiles_for_role_with_backoff(client, role_name): - paginator = client.get_paginator('list_instance_profiles_for_role') - return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles'] - - -def describe_iam_role(module, client, role): - name = role['RoleName'] - try: - role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name) - try: - role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name) - try: - role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name) - try: - role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags']) - del role['Tags'] - except KeyError: - role['tags'] = {} - return role - - -def describe_iam_roles(module, client): - name = module.params['name'] - path_prefix = module.params['path_prefix'] - if name: - try: - roles = [client.get_role(RoleName=name, aws_retry=True)['Role']] - except is_boto3_error_code('NoSuchEntity'): - return [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) - else: - params = dict() - if path_prefix: - if not path_prefix.startswith('/'): - path_prefix = '/' + path_prefix - if not path_prefix.endswith('/'): - path_prefix = path_prefix + '/' - params['PathPrefix'] = path_prefix - try: - roles = list_iam_roles_with_backoff(client, **params)['Roles'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list IAM roles") - return [normalize_role(describe_iam_role(module, client, role)) for role in roles] - - -def normalize_profile(profile): - new_profile = camel_dict_to_snake_dict(profile) - if profile.get("Roles"): - profile["roles"] = [normalize_role(role) for role in profile.get("Roles")] - return new_profile - - -def normalize_role(role): - new_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) - new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument") - if role.get("InstanceProfiles"): - role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")] - return new_role - - -def main(): - """ - Module action handler - """ - argument_spec = dict( - name=dict(aliases=['role_name']), - path_prefix=dict(), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['name', 'path_prefix']]) - - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - - module.deprecate("In a release after 2023-12-01 the contents of assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - ".assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", collection_name="community.aws") - - module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py b/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py index f79e4c2c6..acaaa38fc 100644 --- a/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py +++ b/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py @@ -1,25 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: iam_saml_federation version_added: 1.0.0 @@ -42,17 +27,18 @@ options: default: present choices: [ "present", "absent" ] type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 author: - Tony (@axc450) - Aidan Rowe (@aidan-) -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # It is assumed that their matching environment variables are set. # Creates a new iam saml identity provider if not present @@ -74,9 +60,9 @@ EXAMPLES = ''' community.aws.iam_saml_federation: name: example3 state: absent -''' +""" -RETURN = ''' +RETURN = r""" saml_provider: description: Details of the SAML Identity Provider that was created/modified. type: complex @@ -101,15 +87,16 @@ saml_provider: type: str returned: present sample: "2017-02-08T04:36:28+00:00" -''' +""" try: - import botocore.exceptions + import botocore except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class SAMLProviderManager: @@ -119,7 +106,7 @@ class SAMLProviderManager: self.module = module try: - self.conn = module.client('iam') + self.conn = module.client("iam") except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Unknown AWS SDK error") @@ -146,10 +133,10 @@ class SAMLProviderManager: def _get_provider_arn(self, name): providers = self._list_saml_providers() - for p in providers['SAMLProviderList']: - provider_name = p['Arn'].split('/', 1)[1] + for p in providers["SAMLProviderList"]: + provider_name = p["Arn"].split("/", 1)[1] if name == provider_name: - return p['Arn'] + return p["Arn"] return None @@ -157,55 +144,55 @@ class SAMLProviderManager: if not metadata: self.module.fail_json(msg="saml_metadata_document must be defined for present state") - res = {'changed': False} + res = {"changed": False} try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'") if arn: # see if metadata needs updating try: resp = self._get_saml_provider(arn) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not retrieve the identity provider '{name}'") - if metadata.strip() != resp['SAMLMetadataDocument'].strip(): + if metadata.strip() != resp["SAMLMetadataDocument"].strip(): # provider needs updating - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: resp = self._update_saml_provider(arn, metadata) - res['saml_provider'] = self._build_res(resp['SAMLProviderArn']) + res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not update the identity provider '{name}'") else: - res['saml_provider'] = self._build_res(arn) + res["saml_provider"] = self._build_res(arn) else: # create - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: resp = self._create_saml_provider(metadata, name) - res['saml_provider'] = self._build_res(resp['SAMLProviderArn']) + res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not create the identity provider '{name}'") self.module.exit_json(**res) def delete_saml_provider(self, name): - res = {'changed': False} + res = {"changed": False} try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'") if arn: # delete - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: self._delete_saml_provider(arn) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not delete the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not delete the identity provider '{name}'") self.module.exit_json(**res) @@ -215,7 +202,7 @@ class SAMLProviderManager: "arn": arn, "metadata_document": saml_provider["SAMLMetadataDocument"], "create_date": saml_provider["CreateDate"].isoformat(), - "expire_date": saml_provider["ValidUntil"].isoformat() + "expire_date": saml_provider["ValidUntil"].isoformat(), } @@ -223,26 +210,26 @@ def main(): argument_spec = dict( name=dict(required=True), saml_metadata_document=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'absent']), + state=dict(default="present", required=False, choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[('state', 'present', ['saml_metadata_document'])] + required_if=[("state", "present", ["saml_metadata_document"])], ) - name = module.params['name'] - state = module.params.get('state') - saml_metadata_document = module.params.get('saml_metadata_document') + name = module.params["name"] + state = module.params.get("state") + saml_metadata_document = module.params.get("saml_metadata_document") sp_man = SAMLProviderManager(module) - if state == 'present': + if state == "present": sp_man.create_or_update_saml_provider(name, saml_metadata_document) - elif state == 'absent': + elif state == "absent": sp_man.delete_saml_provider(name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py b/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py index f3d5c5808..6a7734aca 100644 --- a/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py +++ b/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py @@ -1,24 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: iam_server_certificate version_added: 1.0.0 @@ -76,12 +62,14 @@ options: author: - Jonathan I. Davila (@defionscode) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" + +RETURN = r""" # """ -EXAMPLES = ''' +EXAMPLES = r""" - name: Basic server certificate upload from local file community.aws.iam_server_certificate: name: very_ssl @@ -104,7 +92,7 @@ EXAMPLES = ''' name: very_ssl new_name: new_very_ssl state: present -''' +""" try: import botocore @@ -113,29 +101,30 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule @AWSRetry.jittered_backoff() def _list_server_certficates(): - paginator = client.get_paginator('list_server_certificates') - return paginator.paginate().build_full_result()['ServerCertificateMetadataList'] + paginator = client.get_paginator("list_server_certificates") + return paginator.paginate().build_full_result()["ServerCertificateMetadataList"] def check_duplicate_cert(new_cert): - orig_cert_names = list(c['ServerCertificateName'] for c in _list_server_certficates()) + orig_cert_names = list(c["ServerCertificateName"] for c in _list_server_certficates()) for cert_name in orig_cert_names: cert = get_server_certificate(cert_name) if not cert: continue - cert_body = cert.get('certificate_body', None) + cert_body = cert.get("certificate_body", None) if not _compare_cert(new_cert, cert_body): continue module.fail_json( changed=False, - msg='This certificate already exists under the name {0} and dup_ok=False'.format(cert_name), + msg=f"This certificate already exists under the name {cert_name} and dup_ok=False", duplicate_cert=cert, ) @@ -148,25 +137,25 @@ def _compare_cert(cert_a, cert_b): # Trim out the whitespace before comparing the certs. While this could mean # an invalid cert 'matches' a valid cert, that's better than some stray # whitespace breaking things - cert_a.replace('\r', '') - cert_a.replace('\n', '') - cert_a.replace(' ', '') - cert_b.replace('\r', '') - cert_b.replace('\n', '') - cert_b.replace(' ', '') + cert_a.replace("\r", "") + cert_a.replace("\n", "") + cert_a.replace(" ", "") + cert_b.replace("\r", "") + cert_b.replace("\n", "") + cert_b.replace(" ", "") return cert_a == cert_b def update_server_certificate(current_cert): changed = False - cert = module.params.get('cert') - cert_chain = module.params.get('cert_chain') + cert = module.params.get("cert") + cert_chain = module.params.get("cert_chain") - if not _compare_cert(cert, current_cert.get('certificate_body', None)): - module.fail_json(msg='Modifying the certificate body is not supported by AWS') - if not _compare_cert(cert_chain, current_cert.get('certificate_chain', None)): - module.fail_json(msg='Modifying the chaining certificate is not supported by AWS') + if not _compare_cert(cert, current_cert.get("certificate_body", None)): + module.fail_json(msg="Modifying the certificate body is not supported by AWS") + if not _compare_cert(cert_chain, current_cert.get("certificate_chain", None)): + module.fail_json(msg="Modifying the chaining certificate is not supported by AWS") # We can't compare keys. if module.check_mode: @@ -179,15 +168,15 @@ def update_server_certificate(current_cert): def create_server_certificate(): - cert = module.params.get('cert') - key = module.params.get('key') - cert_chain = module.params.get('cert_chain') + cert = module.params.get("cert") + key = module.params.get("key") + cert_chain = module.params.get("cert_chain") - if not module.params.get('dup_ok'): + if not module.params.get("dup_ok"): check_duplicate_cert(cert) - path = module.params.get('path') - name = module.params.get('name') + path = module.params.get("path") + name = module.params.get("name") params = dict( ServerCertificateName=name, @@ -196,28 +185,25 @@ def create_server_certificate(): ) if cert_chain: - params['CertificateChain'] = cert_chain + params["CertificateChain"] = cert_chain if path: - params['Path'] = path + params["Path"] = path if module.check_mode: return True try: - client.upload_server_certificate( - aws_retry=True, - **params - ) + client.upload_server_certificate(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name)) + module.fail_json_aws(e, msg=f"Failed to update server certificate {name}") return True def rename_server_certificate(current_cert): - name = module.params.get('name') - new_name = module.params.get('new_name') - new_path = module.params.get('new_path') + name = module.params.get("name") + new_name = module.params.get("new_name") + new_path = module.params.get("new_path") changes = dict() @@ -226,16 +212,16 @@ def rename_server_certificate(current_cert): current_cert = get_server_certificate(new_name) else: if new_name: - changes['NewServerCertificateName'] = new_name + changes["NewServerCertificateName"] = new_name - cert_metadata = current_cert.get('server_certificate_metadata', {}) + cert_metadata = current_cert.get("server_certificate_metadata", {}) if not current_cert: - module.fail_json(msg='Unable to find certificate {0}'.format(name)) + module.fail_json(msg=f"Unable to find certificate {name}") - current_path = cert_metadata.get('path', None) + current_path = cert_metadata.get("path", None) if new_path and current_path != new_path: - changes['NewPath'] = new_path + changes["NewPath"] = new_path if not changes: return False @@ -244,14 +230,9 @@ def rename_server_certificate(current_cert): return True try: - client.update_server_certificate( - aws_retry=True, - ServerCertificateName=name, - **changes - ) + client.update_server_certificate(aws_retry=True, ServerCertificateName=name, **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name), - changes=changes) + module.fail_json_aws(e, msg=f"Failed to update server certificate {name}", changes=changes) return True @@ -263,17 +244,20 @@ def delete_server_certificate(current_cert): if module.check_mode: return True - name = module.params.get('name') + name = module.params.get("name") try: result = client.delete_server_certificate( aws_retry=True, ServerCertificateName=name, ) - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete server certificate {0}'.format(name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Failed to delete server certificate {name}") return True @@ -286,11 +270,14 @@ def get_server_certificate(name): aws_retry=True, ServerCertificateName=name, ) - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to get server certificate {0}'.format(name)) - cert = dict(camel_dict_to_snake_dict(result.get('ServerCertificate'))) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Failed to get server certificate {name}") + cert = dict(camel_dict_to_snake_dict(result.get("ServerCertificate"))) return cert @@ -300,75 +287,74 @@ def compatability_results(current_cert): if not current_cert: return compat_results - metadata = current_cert.get('server_certificate_metadata', {}) - - if current_cert.get('certificate_body', None): - compat_results['cert_body'] = current_cert.get('certificate_body') - if current_cert.get('certificate_chain', None): - compat_results['chain_cert_body'] = current_cert.get('certificate_chain') - if metadata.get('arn', None): - compat_results['arn'] = metadata.get('arn') - if metadata.get('expiration', None): - compat_results['expiration_date'] = metadata.get('expiration') - if metadata.get('path', None): - compat_results['cert_path'] = metadata.get('path') - if metadata.get('server_certificate_name', None): - compat_results['name'] = metadata.get('server_certificate_name') - if metadata.get('upload_date', None): - compat_results['upload_date'] = metadata.get('upload_date') + metadata = current_cert.get("server_certificate_metadata", {}) + + if current_cert.get("certificate_body", None): + compat_results["cert_body"] = current_cert.get("certificate_body") + if current_cert.get("certificate_chain", None): + compat_results["chain_cert_body"] = current_cert.get("certificate_chain") + if metadata.get("arn", None): + compat_results["arn"] = metadata.get("arn") + if metadata.get("expiration", None): + compat_results["expiration_date"] = metadata.get("expiration") + if metadata.get("path", None): + compat_results["cert_path"] = metadata.get("path") + if metadata.get("server_certificate_name", None): + compat_results["name"] = metadata.get("server_certificate_name") + if metadata.get("upload_date", None): + compat_results["upload_date"] = metadata.get("upload_date") return compat_results def main(): - global module global client argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), cert=dict(), key=dict(no_log=True), cert_chain=dict(), new_name=dict(), - path=dict(default='/'), + path=dict(default="/"), new_path=dict(), - dup_ok=dict(type='bool', default=True), + dup_ok=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['new_path', 'key'], - ['new_path', 'cert'], - ['new_path', 'cert_chain'], - ['new_name', 'key'], - ['new_name', 'cert'], - ['new_name', 'cert_chain'], + ["new_path", "key"], + ["new_path", "cert"], + ["new_path", "cert_chain"], + ["new_name", "key"], + ["new_name", "cert"], + ["new_name", "cert_chain"], ], supports_check_mode=True, ) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get('state') - name = module.params.get('name') - path = module.params.get('path') - new_name = module.params.get('new_name') - new_path = module.params.get('new_path') - dup_ok = module.params.get('dup_ok') + state = module.params.get("state") + name = module.params.get("name") + path = module.params.get("path") + new_name = module.params.get("new_name") + new_path = module.params.get("new_path") + dup_ok = module.params.get("dup_ok") current_cert = get_server_certificate(name) results = dict() - if state == 'absent': + if state == "absent": changed = delete_server_certificate(current_cert) if changed: - results['deleted_cert'] = name + results["deleted_cert"] = name else: - msg = 'Certificate with the name {0} already absent'.format(name) - results['msg'] = msg + msg = f"Certificate with the name {name} already absent" + results["msg"] = msg else: if new_name or new_path: changed = rename_server_certificate(current_cert) @@ -382,16 +368,13 @@ def main(): changed = create_server_certificate() updated_cert = get_server_certificate(name) - results['server_certificate'] = updated_cert + results["server_certificate"] = updated_cert compat_results = compatability_results(updated_cert) if compat_results: results.update(compat_results) - module.exit_json( - changed=changed, - **results - ) + module.exit_json(changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py b/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py index ee0dc590d..5504cb746 100644 --- a/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py +++ b/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py @@ -1,32 +1,30 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_server_certificate_info version_added: 1.0.0 short_description: Retrieve the information of a server certificate description: - Retrieve the attributes of a server certificate. -author: "Allen Sanabria (@linuxdynasty)" +author: + - "Allen Sanabria (@linuxdynasty)" options: name: description: - The name of the server certificate you are retrieving attributes for. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Retrieve server certificate community.aws.iam_server_certificate_info: name: production-cert @@ -37,9 +35,9 @@ EXAMPLES = ''' name: production-cert register: server_cert failed_when: "{{ server_cert.results | length == 0 }}" -''' +""" -RETURN = ''' +RETURN = r""" server_certificate_id: description: The 21 character certificate id returned: success @@ -75,16 +73,15 @@ upload_date: returned: success type: str sample: "2015-04-25T00:36:40+00:00" -''' +""" try: import botocore - import botocore.exceptions except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_server_certs(iam, name=None): @@ -113,22 +110,24 @@ def get_server_certs(iam, name=None): results = dict() try: if name: - server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']] + server_certs = [iam.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]] else: - server_certs = iam.list_server_certificates()['ServerCertificateMetadataList'] + server_certs = iam.list_server_certificates()["ServerCertificateMetadataList"] for server_cert in server_certs: if not name: - server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate'] - cert_md = server_cert['ServerCertificateMetadata'] - results[cert_md['ServerCertificateName']] = { - 'certificate_body': server_cert['CertificateBody'], - 'server_certificate_id': cert_md['ServerCertificateId'], - 'server_certificate_name': cert_md['ServerCertificateName'], - 'arn': cert_md['Arn'], - 'path': cert_md['Path'], - 'expiration': cert_md['Expiration'].isoformat(), - 'upload_date': cert_md['UploadDate'].isoformat(), + server_cert = iam.get_server_certificate(ServerCertificateName=server_cert["ServerCertificateName"])[ + "ServerCertificate" + ] + cert_md = server_cert["ServerCertificateMetadata"] + results[cert_md["ServerCertificateName"]] = { + "certificate_body": server_cert["CertificateBody"], + "server_certificate_id": cert_md["ServerCertificateId"], + "server_certificate_name": cert_md["ServerCertificateName"], + "arn": cert_md["Arn"], + "path": cert_md["Path"], + "expiration": cert_md["Expiration"].isoformat(), + "upload_date": cert_md["UploadDate"].isoformat(), } except botocore.exceptions.ClientError: @@ -139,7 +138,7 @@ def get_server_certs(iam, name=None): def main(): argument_spec = dict( - name=dict(type='str'), + name=dict(type="str"), ) module = AnsibleAWSModule( @@ -148,14 +147,14 @@ def main(): ) try: - iam = module.client('iam') + iam = module.client("iam") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - cert_name = module.params.get('name') + cert_name = module.params.get("name") results = get_server_certs(iam, cert_name) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/inspector_target.py b/ansible_collections/community/aws/plugins/modules/inspector_target.py index 2ec9e9a0e..f9ec6d53a 100644 --- a/ansible_collections/community/aws/plugins/modules/inspector_target.py +++ b/ansible_collections/community/aws/plugins/modules/inspector_target.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Dennis Conrad for Sainsbury's # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: inspector_target version_added: 1.0.0 @@ -39,12 +37,12 @@ options: - Required if I(state=present). type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create my_target Assessment Target community.aws.inspector_target: name: my_target @@ -62,9 +60,9 @@ EXAMPLES = ''' community.aws.inspector_target: name: my_target state: absent -''' +""" -RETURN = ''' +RETURN = r""" arn: description: The ARN that specifies the Amazon Inspector assessment target. returned: success @@ -97,32 +95,32 @@ updated_at: returned: success type: str sample: "2018-01-29T13:48:51.958000+00:00" -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - compare_aws_tags, -) +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def main(): argument_spec = dict( name=dict(required=True), - state=dict(choices=['absent', 'present'], default='present'), - tags=dict(type='dict'), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(type="dict"), ) - required_if = [['state', 'present', ['tags']]] + required_if = [["state", "present", ["tags"]]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -130,29 +128,37 @@ def main(): required_if=required_if, ) - name = module.params.get('name') - state = module.params.get('state').lower() - tags = module.params.get('tags') + name = module.params.get("name") + state = module.params.get("state").lower() + tags = module.params.get("tags") if tags: - tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + tags = ansible_dict_to_boto3_tag_list(tags, "key", "value") - client = module.client('inspector') + client = module.client("inspector") try: existing_target_arn = client.list_assessment_targets( - filter={'assessmentTargetNamePattern': name}, - ).get('assessmentTargetArns')[0] + filter={"assessmentTargetNamePattern": name}, + ).get( + "assessmentTargetArns" + )[0] existing_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[existing_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - existing_resource_group_arn = existing_target.get('resource_group_arn') - existing_resource_group_tags = client.describe_resource_groups( - resourceGroupArns=[existing_resource_group_arn], - ).get('resourceGroups')[0].get('tags') + existing_resource_group_arn = existing_target.get("resource_group_arn") + existing_resource_group_tags = ( + client.describe_resource_groups( + resourceGroupArns=[existing_resource_group_arn], + ) + .get("resourceGroups")[0] + .get("tags") + ) target_exists = True except ( @@ -163,23 +169,18 @@ def main(): except IndexError: target_exists = False - if state == 'present' and target_exists: + if state == "present" and target_exists: ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags) - ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict( - existing_resource_group_tags - ) - tags_to_add, tags_to_remove = compare_aws_tags( - ansible_dict_tags, - ansible_dict_existing_tags - ) + ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(existing_resource_group_tags) + tags_to_add, tags_to_remove = compare_aws_tags(ansible_dict_tags, ansible_dict_existing_tags) if not (tags_to_add or tags_to_remove): - existing_target.update({'tags': ansible_dict_existing_tags}) + existing_target.update({"tags": ansible_dict_existing_tags}) module.exit_json(changed=False, **existing_target) else: try: updated_resource_group_arn = client.create_resource_group( resourceGroupTags=tags, - ).get('resourceGroupArn') + ).get("resourceGroupArn") client.update_assessment_target( assessmentTargetArn=existing_target_arn, @@ -190,10 +191,12 @@ def main(): updated_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[existing_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - updated_target.update({'tags': ansible_dict_tags}) + updated_target.update({"tags": ansible_dict_tags}) module.exit_json(changed=True, **updated_target) except ( botocore.exceptions.BotoCoreError, @@ -201,24 +204,26 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to update target") - elif state == 'present' and not target_exists: + elif state == "present" and not target_exists: try: new_resource_group_arn = client.create_resource_group( resourceGroupTags=tags, - ).get('resourceGroupArn') + ).get("resourceGroupArn") new_target_arn = client.create_assessment_target( assessmentTargetName=name, resourceGroupArn=new_resource_group_arn, - ).get('assessmentTargetArn') + ).get("assessmentTargetArn") new_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[new_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)}) + new_target.update({"tags": boto3_tag_list_to_ansible_dict(tags)}) module.exit_json(changed=True, **new_target) except ( botocore.exceptions.BotoCoreError, @@ -226,7 +231,7 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to create target") - elif state == 'absent' and target_exists: + elif state == "absent" and target_exists: try: client.delete_assessment_target( assessmentTargetArn=existing_target_arn, @@ -238,9 +243,9 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to delete target") - elif state == 'absent' and not target_exists: + elif state == "absent" and not target_exists: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/kinesis_stream.py b/ansible_collections/community/aws/plugins/modules/kinesis_stream.py index e4c5d76df..d1ba65c86 100644 --- a/ansible_collections/community/aws/plugins/modules/kinesis_stream.py +++ b/ansible_collections/community/aws/plugins/modules/kinesis_stream.py @@ -1,22 +1,21 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: kinesis_stream version_added: 1.0.0 short_description: Manage a Kinesis Stream. description: - - Create or Delete a Kinesis Stream. - - Update the retention period of a Kinesis Stream. - - Update Tags on a Kinesis Stream. - - Enable/disable server side encryption on a Kinesis Stream. -author: Allen Sanabria (@linuxdynasty) + - Create or Delete a Kinesis Stream. + - Update the retention period of a Kinesis Stream. + - Update Tags on a Kinesis Stream. + - Enable/disable server side encryption on a Kinesis Stream. +author: + - Allen Sanabria (@linuxdynasty) options: name: description: @@ -73,13 +72,12 @@ options: - The GUID or alias for the KMS key. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic creation example: @@ -148,9 +146,9 @@ EXAMPLES = ''' wait: true wait_timeout: 600 register: test_stream -''' +""" -RETURN = ''' +RETURN = r""" stream_name: description: The name of the Kinesis Stream. returned: when state == present. @@ -179,7 +177,7 @@ tags: "Name": "Splunk", "Env": "development" } -''' +""" import time @@ -191,9 +189,10 @@ except ImportError: from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_tags(client, stream_name): @@ -210,16 +209,14 @@ def get_tags(client, stream_name): Returns: Tuple (bool, str, dict) """ - err_msg = '' + err_msg = "" success = False params = { - 'StreamName': stream_name, + "StreamName": stream_name, } results = dict() try: - results = ( - client.list_tags_for_stream(**params)['Tags'] - ) + results = client.list_tags_for_stream(**params)["Tags"] success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -240,28 +237,26 @@ def find_stream(client, stream_name): Returns: Tuple (bool, str, dict) """ - err_msg = '' + err_msg = "" success = False params = { - 'StreamName': stream_name, + "StreamName": stream_name, } results = dict() has_more_shards = True shards = list() try: while has_more_shards: - results = ( - client.describe_stream(**params)['StreamDescription'] - ) - shards.extend(results.pop('Shards')) - has_more_shards = results['HasMoreShards'] + results = client.describe_stream(**params)["StreamDescription"] + shards.extend(results.pop("Shards")) + has_more_shards = results["HasMoreShards"] if has_more_shards: - params['ExclusiveStartShardId'] = shards[-1]['ShardId'] - results['Shards'] = shards - num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']]) - results['OpenShardsCount'] = len(shards) - num_closed_shards - results['ClosedShardsCount'] = num_closed_shards - results['ShardsCount'] = len(shards) + params["ExclusiveStartShardId"] = shards[-1]["ShardId"] + results["Shards"] = shards + num_closed_shards = len([s for s in shards if "EndingSequenceNumber" in s["SequenceNumberRange"]]) + results["OpenShardsCount"] = len(shards) - num_closed_shards + results["ClosedShardsCount"] = num_closed_shards + results["ShardsCount"] = len(shards) success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -269,8 +264,7 @@ def find_stream(client, stream_name): return success, err_msg, results -def wait_for_status(client, stream_name, status, wait_timeout=300, - check_mode=False): +def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=False): """Wait for the status to change for a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client @@ -299,16 +293,14 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, while wait_timeout > time.time(): try: - find_success, find_msg, stream = ( - find_stream(client, stream_name) - ) + find_success, find_msg, stream = find_stream(client, stream_name) if check_mode: status_achieved = True break - elif status != 'DELETING': + elif status != "DELETING": if find_success and stream: - if stream.get('StreamStatus') == status: + if stream.get("StreamStatus") == status: status_achieved = True break @@ -325,12 +317,12 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, if not status_achieved: err_msg = "Wait time out reached, while waiting for results" else: - err_msg = "Status {0} achieved successfully".format(status) + err_msg = f"Status {status} achieved successfully" return status_achieved, err_msg, stream -def tags_action(client, stream_name, tags, action='create', check_mode=False): +def tags_action(client, stream_name, tags, action="create", check_mode=False): """Create or delete multiple tags from a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -357,26 +349,26 @@ def tags_action(client, stream_name, tags, action='create', check_mode=False): """ success = False err_msg = "" - params = {'StreamName': stream_name} + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'create': - params['Tags'] = tags + if action == "create": + params["Tags"] = tags client.add_tags_to_stream(**params) success = True - elif action == 'delete': - params['TagKeys'] = tags + elif action == "delete": + params["TagKeys"] = tags client.remove_tags_from_stream(**params) success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = f"Invalid action {action}" else: - if action == 'create': + if action == "create": success = True - elif action == 'delete': + elif action == "delete": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -406,32 +398,25 @@ def update_tags(client, stream_name, tags, check_mode=False): """ success = False changed = False - err_msg = '' - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + err_msg = "" + tag_success, tag_msg, current_tags = get_tags(client, stream_name) tags_to_set, tags_to_delete = compare_aws_tags( - current_tags, tags, + current_tags, + tags, purge_tags=True, ) if tags_to_delete: - delete_success, delete_msg = ( - tags_action( - client, stream_name, tags_to_delete, action='delete', - check_mode=check_mode - ) + delete_success, delete_msg = tags_action( + client, stream_name, tags_to_delete, action="delete", check_mode=check_mode ) if not delete_success: return delete_success, changed, delete_msg - tag_msg = 'Tags removed' + tag_msg = "Tags removed" if tags_to_set: - create_success, create_msg = ( - tags_action( - client, stream_name, tags_to_set, action='create', - check_mode=check_mode - ) + create_success, create_msg = tags_action( + client, stream_name, tags_to_set, action="create", check_mode=check_mode ) if create_success: changed = True @@ -440,8 +425,7 @@ def update_tags(client, stream_name, tags, check_mode=False): return success, changed, err_msg -def stream_action(client, stream_name, shard_count=1, action='create', - timeout=300, check_mode=False): +def stream_action(client, stream_name, shard_count=1, action="create", timeout=300, check_mode=False): """Create or Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -465,28 +449,26 @@ def stream_action(client, stream_name, shard_count=1, action='create', List (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'create': - params['ShardCount'] = shard_count + if action == "create": + params["ShardCount"] = shard_count client.create_stream(**params) success = True - elif action == 'delete': + elif action == "delete": client.delete_stream(**params) success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = f"Invalid action {action}" else: - if action == 'create': + if action == "create": success = True - elif action == 'delete': + elif action == "delete": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -494,8 +476,9 @@ def stream_action(client, stream_name, shard_count=1, action='create', return success, err_msg -def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='', - timeout=300, check_mode=False): +def stream_encryption_action( + client, stream_name, action="start_encryption", encryption_type="", key_id="", timeout=300, check_mode=False +): """Create, Encrypt or Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -521,31 +504,29 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc List (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'start_encryption': - params['EncryptionType'] = encryption_type - params['KeyId'] = key_id + if action == "start_encryption": + params["EncryptionType"] = encryption_type + params["KeyId"] = key_id client.start_stream_encryption(**params) success = True - elif action == 'stop_encryption': - params['EncryptionType'] = encryption_type - params['KeyId'] = key_id + elif action == "stop_encryption": + params["EncryptionType"] = encryption_type + params["KeyId"] = key_id client.stop_stream_encryption(**params) success = True else: - err_msg = 'Invalid encryption action {0}'.format(action) + err_msg = f"Invalid encryption action {action}" else: - if action == 'start_encryption': + if action == "start_encryption": success = True - elif action == 'stop_encryption': + elif action == "stop_encryption": success = True else: - err_msg = 'Invalid encryption action {0}'.format(action) + err_msg = f"Invalid encryption action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -553,8 +534,7 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc return success, err_msg -def retention_action(client, stream_name, retention_period=24, - action='increase', check_mode=False): +def retention_action(client, stream_name, retention_period=24, action="increase", check_mode=False): """Increase or Decrease the retention of messages in the Kinesis stream. Args: client (botocore.client.EC2): Boto3 client. @@ -579,35 +559,29 @@ def retention_action(client, stream_name, retention_period=24, Tuple (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'increase': - params['RetentionPeriodHours'] = retention_period + if action == "increase": + params["RetentionPeriodHours"] = retention_period client.increase_stream_retention_period(**params) success = True - err_msg = ( - 'Retention Period increased successfully to {0}'.format(retention_period) - ) - elif action == 'decrease': - params['RetentionPeriodHours'] = retention_period + err_msg = f"Retention Period increased successfully to {retention_period}" + elif action == "decrease": + params["RetentionPeriodHours"] = retention_period client.decrease_stream_retention_period(**params) success = True - err_msg = ( - 'Retention Period decreased successfully to {0}'.format(retention_period) - ) + err_msg = f"Retention Period decreased successfully to {retention_period}" else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = f"Invalid action {action}" else: - if action == 'increase': + if action == "increase": success = True - elif action == 'decrease': + elif action == "decrease": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -637,13 +611,10 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False Tuple (bool, str) """ success = True - err_msg = '' - params = { - 'StreamName': stream_name, - 'ScalingType': 'UNIFORM_SCALING' - } + err_msg = "" + params = {"StreamName": stream_name, "ScalingType": "UNIFORM_SCALING"} if not check_mode: - params['TargetShardCount'] = number_of_shards + params["TargetShardCount"] = number_of_shards try: client.update_shard_count(**params) except botocore.exceptions.ClientError as e: @@ -652,8 +623,17 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False return success, err_msg -def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None, - tags=None, wait=False, wait_timeout=300, check_mode=False): +def update( + client, + current_stream, + stream_name, + number_of_shards=1, + retention_period=None, + tags=None, + wait=False, + wait_timeout=300, + check_mode=False, +): """Update an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -693,44 +673,29 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe """ success = True changed = False - err_msg = '' + err_msg = "" if retention_period: if wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, False, wait_msg - if current_stream.get('StreamStatus') == 'ACTIVE': + if current_stream.get("StreamStatus") == "ACTIVE": retention_changed = False - if retention_period > current_stream['RetentionPeriodHours']: - retention_changed, retention_msg = ( - retention_action( - client, stream_name, retention_period, action='increase', - check_mode=check_mode - ) + if retention_period > current_stream["RetentionPeriodHours"]: + retention_changed, retention_msg = retention_action( + client, stream_name, retention_period, action="increase", check_mode=check_mode ) - elif retention_period < current_stream['RetentionPeriodHours']: - retention_changed, retention_msg = ( - retention_action( - client, stream_name, retention_period, action='decrease', - check_mode=check_mode - ) + elif retention_period < current_stream["RetentionPeriodHours"]: + retention_changed, retention_msg = retention_action( + client, stream_name, retention_period, action="decrease", check_mode=check_mode ) - elif retention_period == current_stream['RetentionPeriodHours']: - retention_msg = ( - 'Retention {0} is the same as {1}' - .format( - retention_period, - current_stream['RetentionPeriodHours'] - ) - ) + elif retention_period == current_stream["RetentionPeriodHours"]: + retention_msg = f"Retention {retention_period} is the same as {current_stream['RetentionPeriodHours']}" success = True if retention_changed: @@ -739,36 +704,26 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe err_msg = retention_msg if changed and wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, False, wait_msg elif changed and not wait: - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if current_stream['StreamStatus'] != 'ACTIVE': - err_msg = ( - 'Retention Period for {0} is in the process of updating' - .format(stream_name) - ) + if current_stream["StreamStatus"] != "ACTIVE": + err_msg = f"Retention Period for {stream_name} is in the process of updating" return success, changed, err_msg else: err_msg = ( - 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' - .format(current_stream.get('StreamStatus', 'UNKNOWN')) + "StreamStatus has to be ACTIVE in order to modify the retention period." + f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" ) return success, changed, err_msg - if current_stream['OpenShardsCount'] != number_of_shards: - success, err_msg = ( - update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) - ) + if current_stream["OpenShardsCount"] != number_of_shards: + success, err_msg = update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) if not success: return success, changed, err_msg @@ -776,47 +731,42 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe changed = True if wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, changed, wait_msg else: - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) - if stream_found and current_stream['StreamStatus'] != 'ACTIVE': - err_msg = ( - 'Number of shards for {0} is in the process of updating' - .format(stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) + if stream_found and current_stream["StreamStatus"] != "ACTIVE": + err_msg = f"Number of shards for {stream_name} is in the process of updating" return success, changed, err_msg if tags: - tag_success, tag_changed, err_msg = ( - update_tags(client, stream_name, tags, check_mode=check_mode) - ) + tag_success, tag_changed, err_msg = update_tags(client, stream_name, tags, check_mode=check_mode) changed |= tag_changed if wait: - success, err_msg, status_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, status_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if success and changed: - err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name) + err_msg = f"Kinesis Stream {stream_name} updated successfully." elif success and not changed: - err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name) + err_msg = f"Kinesis Stream {stream_name} did not change." return success, changed, err_msg -def create_stream(client, stream_name, number_of_shards=1, retention_period=None, - tags=None, wait=False, wait_timeout=300, check_mode=False): +def create_stream( + client, + stream_name, + number_of_shards=1, + retention_period=None, + tags=None, + wait=False, + wait_timeout=300, + check_mode=False, +): """Create an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -848,79 +798,59 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None """ success = False changed = False - err_msg = '' + err_msg = "" results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + if stream_found and current_stream.get("StreamStatus") == "DELETING" and wait: + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - if stream_found and current_stream.get('StreamStatus') != 'DELETING': + if stream_found and current_stream.get("StreamStatus") != "DELETING": success, changed, err_msg = update( - client, current_stream, stream_name, number_of_shards, - retention_period, tags, wait, wait_timeout, check_mode=check_mode + client, + current_stream, + stream_name, + number_of_shards, + retention_period, + tags, + wait, + wait_timeout, + check_mode=check_mode, ) else: - create_success, create_msg = ( - stream_action( - client, stream_name, number_of_shards, action='create', - check_mode=check_mode - ) + create_success, create_msg = stream_action( + client, stream_name, number_of_shards, action="create", check_mode=check_mode ) if not create_success: changed = True - err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg) + err_msg = f"Failed to create Kinesis stream: {create_msg}" return False, True, err_msg, {} else: changed = True if wait: - wait_success, wait_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) - ) - err_msg = ( - 'Kinesis Stream {0} is in the process of being created' - .format(stream_name) + wait_success, wait_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) + err_msg = f"Kinesis Stream {stream_name} is in the process of being created" if not wait_success: return wait_success, True, wait_msg, results else: - err_msg = ( - 'Kinesis Stream {0} created successfully' - .format(stream_name) - ) + err_msg = f"Kinesis Stream {stream_name} created successfully" if tags: - changed, err_msg = ( - tags_action( - client, stream_name, tags, action='create', - check_mode=check_mode - ) - ) + changed, err_msg = tags_action(client, stream_name, tags, action="create", check_mode=check_mode) if changed: success = True if not success: return success, changed, err_msg, results - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) - if retention_period and current_stream.get('StreamStatus') == 'ACTIVE': - changed, err_msg = ( - retention_action( - client, stream_name, retention_period, action='increase', - check_mode=check_mode - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) + if retention_period and current_stream.get("StreamStatus") == "ACTIVE": + changed, err_msg = retention_action( + client, stream_name, retention_period, action="increase", check_mode=check_mode ) if changed: success = True @@ -928,19 +858,15 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None return success, changed, err_msg, results else: err_msg = ( - 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' - .format(current_stream.get('StreamStatus', 'UNKNOWN')) + "StreamStatus has to be ACTIVE in order to modify the retention period." + f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" ) success = create_success changed = True if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if check_mode: current_tags = tags @@ -948,13 +874,12 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results -def delete_stream(client, stream_name, wait=False, wait_timeout=300, - check_mode=False): +def delete_stream(client, stream_name, wait=False, wait_timeout=300, check_mode=False): """Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -978,44 +903,33 @@ def delete_stream(client, stream_name, wait=False, wait_timeout=300, """ success = False changed = False - err_msg = '' + err_msg = "" results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - success, err_msg = ( - stream_action( - client, stream_name, action='delete', check_mode=check_mode - ) - ) + success, err_msg = stream_action(client, stream_name, action="delete", check_mode=check_mode) if success: changed = True if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'DELETING', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "DELETING", wait_timeout, check_mode=check_mode ) - err_msg = 'Stream {0} deleted successfully'.format(stream_name) + err_msg = f"Stream {stream_name} deleted successfully" if not success: return success, True, err_msg, results else: - err_msg = ( - 'Stream {0} is in the process of being deleted' - .format(stream_name) - ) + err_msg = f"Stream {stream_name} is in the process of being deleted" else: success = True changed = False - err_msg = 'Stream {0} does not exist'.format(stream_name) + err_msg = f"Stream {stream_name} does not exist" return success, changed, err_msg, results -def start_stream_encryption(client, stream_name, encryption_type='', key_id='', - wait=False, wait_timeout=300, check_mode=False): +def start_stream_encryption( + client, stream_name, encryption_type="", key_id="", wait=False, wait_timeout=300, check_mode=False +): """Start encryption on an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -1043,65 +957,56 @@ def start_stream_encryption(client, stream_name, encryption_type='', key_id='', """ success = False changed = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if (current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id): + if current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id: changed = False success = True - err_msg = 'Kinesis Stream {0} encryption already configured.'.format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption already configured." else: - success, err_msg = ( - stream_encryption_action( - client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode - ) + success, err_msg = stream_encryption_action( + client, + stream_name, + action="start_encryption", + encryption_type=encryption_type, + key_id=key_id, + check_mode=check_mode, ) if success: changed = True if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption started successfully." if not success: return success, True, err_msg, results else: - err_msg = ( - 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name) - ) + err_msg = f"Kinesis Stream {stream_name} is in the process of starting encryption." else: success = True changed = False - err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name) + err_msg = f"Kinesis Stream {stream_name} does not exist" if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if not current_tags: current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results -def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', - wait=True, wait_timeout=300, check_mode=False): +def stop_stream_encryption( + client, stream_name, encryption_type="", key_id="", wait=True, wait_timeout=300, check_mode=False +): """Stop encryption on an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -1127,57 +1032,47 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', """ success = False changed = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if current_stream.get('EncryptionType') == 'KMS': - success, err_msg = ( - stream_encryption_action( - client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode - ) + if current_stream.get("EncryptionType") == "KMS": + success, err_msg = stream_encryption_action( + client, + stream_name, + action="stop_encryption", + key_id=key_id, + encryption_type=encryption_type, + check_mode=check_mode, ) changed = success if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not success: return success, True, err_msg, results - err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption stopped successfully." else: - err_msg = ( - 'Stream {0} is in the process of stopping encryption.'.format(stream_name) - ) - elif current_stream.get('EncryptionType') == 'NONE': + err_msg = f"Stream {stream_name} is in the process of stopping encryption." + elif current_stream.get("EncryptionType") == "NONE": success = True - err_msg = 'Kinesis Stream {0} encryption already stopped.'.format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption already stopped." else: success = True changed = False - err_msg = 'Stream {0} does not exist.'.format(stream_name) + err_msg = f"Stream {stream_name} does not exist." if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if not current_tags: current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results @@ -1185,78 +1080,65 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', def main(): argument_spec = dict( name=dict(required=True), - shards=dict(default=None, required=False, type='int'), - retention_period=dict(default=None, required=False, type='int'), - tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), - wait=dict(default=True, required=False, type='bool'), - wait_timeout=dict(default=300, required=False, type='int'), - state=dict(default='present', choices=['present', 'absent']), - encryption_type=dict(required=False, choices=['NONE', 'KMS']), - key_id=dict(required=False, type='str'), - encryption_state=dict(required=False, choices=['enabled', 'disabled']), + shards=dict(default=None, required=False, type="int"), + retention_period=dict(default=None, required=False, type="int"), + tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), + wait=dict(default=True, required=False, type="bool"), + wait_timeout=dict(default=300, required=False, type="int"), + state=dict(default="present", choices=["present", "absent"]), + encryption_type=dict(required=False, choices=["NONE", "KMS"]), + key_id=dict(required=False, type="str"), + encryption_state=dict(required=False, choices=["enabled", "disabled"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) - retention_period = module.params.get('retention_period') - stream_name = module.params.get('name') - shards = module.params.get('shards') - state = module.params.get('state') - tags = module.params.get('tags') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - encryption_type = module.params.get('encryption_type') - key_id = module.params.get('key_id') - encryption_state = module.params.get('encryption_state') + retention_period = module.params.get("retention_period") + stream_name = module.params.get("name") + shards = module.params.get("shards") + state = module.params.get("state") + tags = module.params.get("tags") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + encryption_type = module.params.get("encryption_type") + key_id = module.params.get("key_id") + encryption_state = module.params.get("encryption_state") - if state == 'present' and not shards: - module.fail_json(msg='Shards is required when state == present.') + if state == "present" and not shards: + module.fail_json(msg="Shards is required when state == present.") if retention_period: if retention_period < 24: - module.fail_json(msg='Retention period can not be less than 24 hours.') + module.fail_json(msg="Retention period can not be less than 24 hours.") check_mode = module.check_mode try: - client = module.client('kinesis') + client = module.client("kinesis") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': - success, changed, err_msg, results = ( - create_stream( - client, stream_name, shards, retention_period, tags, - wait, wait_timeout, check_mode - ) + if state == "present": + success, changed, err_msg, results = create_stream( + client, stream_name, shards, retention_period, tags, wait, wait_timeout, check_mode ) - if encryption_state == 'enabled': - success, changed, err_msg, results = ( - start_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) + if encryption_state == "enabled": + success, changed, err_msg, results = start_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode ) - elif encryption_state == 'disabled': - success, changed, err_msg, results = ( - stop_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) + elif encryption_state == "disabled": + success, changed, err_msg, results = stop_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode ) - elif state == 'absent': - success, changed, err_msg, results = ( - delete_stream(client, stream_name, wait, wait_timeout, check_mode) - ) + elif state == "absent": + success, changed, err_msg, results = delete_stream(client, stream_name, wait, wait_timeout, check_mode) if success: - module.exit_json( - success=success, changed=changed, msg=err_msg, **results - ) + module.exit_json(success=success, changed=changed, msg=err_msg, **results) else: - module.fail_json( - success=success, changed=changed, msg=err_msg, result=results - ) + module.fail_json(success=success, changed=changed, msg=err_msg, result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/lightsail.py b/ansible_collections/community/aws/plugins/modules/lightsail.py index 5e4035154..16b4338e7 100644 --- a/ansible_collections/community/aws/plugins/modules/lightsail.py +++ b/ansible_collections/community/aws/plugins/modules/lightsail.py @@ -1,23 +1,20 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lightsail version_added: 1.0.0 short_description: Manage instances in AWS Lightsail description: - - Manage instances in AWS Lightsail. - - Instance tagging is not yet supported in this module. + - Manage instances in AWS Lightsail. + - Instance tagging is not yet supported in this module. author: - - "Nick Ball (@nickball)" - - "Prasad Katti (@prasadkatti)" + - "Nick Ball (@nickball)" + - "Prasad Katti (@prasadkatti)" options: state: description: @@ -50,6 +47,38 @@ options: - Launch script that can configure the instance with additional data. type: str default: '' + public_ports: + description: + - A list of dictionaries to describe the ports to open for the specified instance. + type: list + elements: dict + suboptions: + from_port: + description: The first port in a range of open ports on the instance. + type: int + required: true + to_port: + description: The last port in a range of open ports on the instance. + type: int + required: true + protocol: + description: The IP protocol name accepted for the defined range of open ports. + type: str + choices: ['tcp', 'all', 'udp', 'icmp'] + required: true + cidrs: + description: + - The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. + - One of I(cidrs) or I(ipv6_cidrs) must be specified. + type: list + elements: str + ipv6_cidrs: + description: + - The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. + - One of I(cidrs) or I(ipv6_cidrs) must be specified. + type: list + elements: str + version_added: 6.0.0 key_pair_name: description: - Name of the key pair to use with the instance. @@ -69,14 +98,13 @@ options: type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new Lightsail instance community.aws.lightsail: state: present @@ -87,6 +115,12 @@ EXAMPLES = ''' bundle_id: nano_1_0 key_pair_name: id_rsa user_data: " echo 'hello world' > /home/ubuntu/test.txt" + public_ports: + - from_port: 22 + to_port: 22 + protocol: "tcp" + cidrs: ["0.0.0.0/0"] + ipv6_cidrs: ["::/0"] register: my_instance - name: Delete an instance @@ -94,10 +128,9 @@ EXAMPLES = ''' state: absent region: us-east-1 name: my_instance +""" -''' - -RETURN = ''' +RETURN = r""" changed: description: if a snapshot has been modified/created returned: always @@ -149,7 +182,7 @@ instance: name: running support_code: "123456789012/i-0997c97831ee21e33" username: "ubuntu" -''' +""" import time @@ -160,22 +193,23 @@ except ImportError: pass from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -def find_instance_info(module, client, instance_name, fail_if_not_found=False): +def find_instance_info(module, client, instance_name, fail_if_not_found=False): try: res = client.get_instance(instanceName=instance_name) - except is_boto3_error_code('NotFoundException') as e: + except is_boto3_error_code("NotFoundException") as e: if fail_if_not_found: module.fail_json_aws(e) return None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - return res['instance'] + return res["instance"] def wait_for_instance_state(module, client, instance_name, states): @@ -183,53 +217,69 @@ def wait_for_instance_state(module, client, instance_name, states): `states` is a list of instance states that we are waiting for. """ - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") wait_max = time.time() + wait_timeout while wait_max > time.time(): try: instance = find_instance_info(module, client, instance_name) - if instance['state']['name'] in states: + if instance["state"]["name"] in states: break time.sleep(5) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) else: - module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -' - ' {1}'.format(instance_name, states)) + module.fail_json( + msg=f'Timed out waiting for instance "{instance_name}" to get to one of the following states - {states}' + ) -def create_instance(module, client, instance_name): +def update_public_ports(module, client, instance_name): + try: + client.put_instance_public_ports( + portInfos=snake_dict_to_camel_dict(module.params.get("public_ports")), + instanceName=instance_name, + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + +def create_or_update_instance(module, client, instance_name): inst = find_instance_info(module, client, instance_name) - if inst: - module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst)) - else: - create_params = {'instanceNames': [instance_name], - 'availabilityZone': module.params.get('zone'), - 'blueprintId': module.params.get('blueprint_id'), - 'bundleId': module.params.get('bundle_id'), - 'userData': module.params.get('user_data')} - key_pair_name = module.params.get('key_pair_name') + if not inst: + create_params = { + "instanceNames": [instance_name], + "availabilityZone": module.params.get("zone"), + "blueprintId": module.params.get("blueprint_id"), + "bundleId": module.params.get("bundle_id"), + "userData": module.params.get("user_data"), + } + + key_pair_name = module.params.get("key_pair_name") if key_pair_name: - create_params['keyPairName'] = key_pair_name + create_params["keyPairName"] = key_pair_name try: client.create_instances(**create_params) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - wait = module.params.get('wait') + wait = module.params.get("wait") if wait: - desired_states = ['running'] + desired_states = ["running"] wait_for_instance_state(module, client, instance_name, desired_states) - inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) - module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst)) + if module.params.get("public_ports") is not None: + update_public_ports(module, client, instance_name) + after_update_inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) + module.exit_json( + changed=after_update_inst != inst, + instance=camel_dict_to_snake_dict(after_update_inst), + ) -def delete_instance(module, client, instance_name): +def delete_instance(module, client, instance_name): changed = False inst = find_instance_info(module, client, instance_name) @@ -237,7 +287,7 @@ def delete_instance(module, client, instance_name): module.exit_json(changed=changed, instance={}) # Wait for instance to exit transition state before deleting - desired_states = ['running', 'stopped'] + desired_states = ["running", "stopped"] wait_for_instance_state(module, client, instance_name, desired_states) try: @@ -278,13 +328,13 @@ def start_or_stop_instance(module, client, instance_name, state): inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) # Wait for instance to exit transition state before state change - desired_states = ['running', 'stopped'] + desired_states = ["running", "stopped"] wait_for_instance_state(module, client, instance_name, desired_states) # Try state change - if inst and inst['state']['name'] != state: + if inst and inst["state"]["name"] != state: try: - if state == 'running': + if state == "running": client.start_instance(instanceName=instance_name) else: client.stop_instance(instanceName=instance_name) @@ -294,7 +344,7 @@ def start_or_stop_instance(module, client, instance_name, state): # Grab current instance info inst = find_instance_info(module, client, instance_name) - wait = module.params.get('wait') + wait = module.params.get("wait") if wait: desired_states = [state] wait_for_instance_state(module, client, instance_name, desired_states) @@ -304,37 +354,50 @@ def start_or_stop_instance(module, client, instance_name, state): def main(): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted', - 'rebooted']), - zone=dict(type='str'), - blueprint_id=dict(type='str'), - bundle_id=dict(type='str'), - key_pair_name=dict(type='str'), - user_data=dict(type='str', default=''), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=300, type='int'), + name=dict(type="str", required=True), + state=dict( + type="str", default="present", choices=["present", "absent", "stopped", "running", "restarted", "rebooted"] + ), + zone=dict(type="str"), + blueprint_id=dict(type="str"), + bundle_id=dict(type="str"), + key_pair_name=dict(type="str"), + user_data=dict(type="str", default=""), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int"), + public_ports=dict( + type="list", + elements="dict", + options=dict( + from_port=dict(type="int", required=True), + to_port=dict(type="int", required=True), + protocol=dict(type="str", choices=["tcp", "all", "udp", "icmp"], required=True), + cidrs=dict(type="list", elements="str"), + ipv6_cidrs=dict(type="list", elements="str"), + ), + required_one_of=[("cidrs", "ipv6_cidrs")], + ), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]]) + module = AnsibleAWSModule( + argument_spec=argument_spec, required_if=[["state", "present", ("zone", "blueprint_id", "bundle_id")]] + ) - client = module.client('lightsail') + client = module.client("lightsail") - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") - if state == 'present': - create_instance(module, client, name) - elif state == 'absent': + if state == "present": + create_or_update_instance(module, client, name) + elif state == "absent": delete_instance(module, client, name) - elif state in ('running', 'stopped'): + elif state in ("running", "stopped"): start_or_stop_instance(module, client, name, state) - elif state in ('restarted', 'rebooted'): + elif state in ("restarted", "rebooted"): restart_instance(module, client, name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/lightsail_snapshot.py b/ansible_collections/community/aws/plugins/modules/lightsail_snapshot.py new file mode 100644 index 000000000..1d0d178aa --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/lightsail_snapshot.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: lightsail_snapshot +version_added: "6.0.0" +short_description: Creates snapshots of AWS Lightsail instances +description: + - Creates snapshots of AWS Lightsail instances. +author: + - "Nuno Saavedra (@Nfsaavedra)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + snapshot_name: + description: Name of the new instance snapshot. + required: true + type: str + instance_name: + description: + - Name of the instance to create the snapshot. + - Required when I(state=present). + type: str + wait: + description: + - Wait for the instance snapshot to be created before returning. + type: bool + default: true + wait_timeout: + description: + - How long before I(wait) gives up, in seconds. + default: 300 + type: int + +extends_documentation_fragment: +- amazon.aws.common.modules +- amazon.aws.region.modules +- amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Create AWS Lightsail snapshot + lightsail_snapshot: + region: us-east-1 + snapshot_name: "my_instance_snapshot" + instance_name: "my_instance" + +- name: Delete AWS Lightsail snapshot + lightsail_snapshot: + region: us-east-1 + snapshot_name: "my_instance_snapshot" + state: absent +""" + +RETURN = r""" +changed: + description: if a snapshot has been modified/created + returned: always + type: bool + sample: + changed: true +snapshot: + description: instance snapshot data + type: dict + returned: always + sample: + arn: "arn:aws:lightsail:us-east-1:070807442430:InstanceSnapshot/54b0f785-7132-443d-9e32-95a6825636a4" + created_at: "2023-02-23T18:46:11.183000+00:00" + from_attached_disks: [] + from_blueprint_id: "amazon_linux_2" + from_bundle_id: "nano_2_0" + from_instance_arn: "arn:aws:lightsail:us-east-1:070807442430:Instance/5ca1e7ca-a994-4e19-bb82-deb9d79e9ca3" + from_instance_name: "my_instance" + is_from_auto_snapshot: false + location: + availability_zone: "all" + region_name: "us-east-1" + name: "my_instance_snapshot" + resource_type: "InstanceSnapshot" + size_in_gb: 20 + state: "available" + support_code: "351201681302/ami-06b48e5589f1e248b" + tags: [] +""" + +import time + +try: + import botocore +except ImportError: + # will be caught by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +def find_instance_snapshot_info(module, client, instance_snapshot_name, fail_if_not_found=False): + try: + res = client.get_instance_snapshot(instanceSnapshotName=instance_snapshot_name) + except is_boto3_error_code("NotFoundException") as e: + if fail_if_not_found: + module.fail_json_aws(e) + return None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + return res["instanceSnapshot"] + + +def wait_for_instance_snapshot(module, client, instance_snapshot_name): + wait_timeout = module.params.get("wait_timeout") + wait_max = time.time() + wait_timeout + snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) + + while wait_max > time.time(): + snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) + current_state = snapshot["state"] + if current_state != "pending": + break + time.sleep(5) + else: + module.fail_json(msg=f'Timed out waiting for instance snapshot "{instance_snapshot_name}" to be created.') + + return snapshot + + +def create_snapshot(module, client): + snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) + new_instance = snapshot is None + + if module.check_mode or not new_instance: + snapshot = snapshot if snapshot is not None else {} + module.exit_json( + changed=new_instance, + instance_snapshot=camel_dict_to_snake_dict(snapshot), + ) + + try: + snapshot = client.create_instance_snapshot( + instanceSnapshotName=module.params.get("snapshot_name"), + instanceName=module.params.get("instance_name"), + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + if module.params.get("wait"): + snapshot = wait_for_instance_snapshot(module, client, module.params.get("snapshot_name")) + + module.exit_json( + changed=new_instance, + instance_snapshot=camel_dict_to_snake_dict(snapshot), + ) + + +def delete_snapshot(module, client): + snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) + if module.check_mode or snapshot is None: + changed = not (snapshot is None) + instance = snapshot if changed else {} + module.exit_json(changed=changed, instance=instance) + + try: + client.delete_instance_snapshot(instanceSnapshotName=module.params.get("snapshot_name")) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(changed=True, instance=camel_dict_to_snake_dict(snapshot)) + + +def main(): + argument_spec = dict( + state=dict(type="str", default="present", choices=["present", "absent"]), + snapshot_name=dict(type="str", required=True), + instance_name=dict(type="str"), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int"), + ) + required_if = [ + ["state", "present", ("instance_name",)], + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + client = module.client("lightsail") + + state = module.params.get("state") + + if state == "present": + create_snapshot(module, client) + elif state == "absent": + delete_snapshot(module, client) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py b/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py index 799ff629d..40d10a86b 100644 --- a/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py +++ b/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py @@ -1,14 +1,10 @@ #!/usr/bin/python - # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lightsail_static_ip version_added: 4.1.0 @@ -29,13 +25,13 @@ options: required: true type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Provision a Lightsail static IP community.aws.lightsail_static_ip: state: present @@ -46,9 +42,9 @@ EXAMPLES = ''' community.aws.lightsail_static_ip: state: absent name: my_static_ip -''' +""" -RETURN = ''' +RETURN = r""" static_ip: description: static_ipinstance data returned: always @@ -64,7 +60,7 @@ static_ip: name: "static_ip" resource_type: StaticIp support_code: "123456789012/192.0.2.5" -''' +""" try: import botocore @@ -74,30 +70,29 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): +def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): try: res = client.get_static_ip(staticIpName=static_ip_name) - except is_boto3_error_code('NotFoundException') as e: + except is_boto3_error_code("NotFoundException") as e: if fail_if_not_found: module.fail_json_aws(e) return None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - return res['staticIp'] + return res["staticIp"] def create_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) if inst: module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst)) else: - create_params = {'staticIpName': static_ip_name} + create_params = {"staticIpName": static_ip_name} try: client.allocate_static_ip(**create_params) @@ -110,7 +105,6 @@ def create_static_ip(module, client, static_ip_name): def delete_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) if inst is None: module.exit_json(changed=False, static_ip={}) @@ -126,24 +120,23 @@ def delete_static_ip(module, client, static_ip_name): def main(): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client = module.client('lightsail') + client = module.client("lightsail") - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") - if state == 'present': + if state == "present": create_static_ip(module, client, name) - elif state == 'absent': + elif state == "absent": delete_static_ip(module, client, name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/mq_broker.py b/ansible_collections/community/aws/plugins/modules/mq_broker.py new file mode 100644 index 000000000..5a97fda92 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/mq_broker.py @@ -0,0 +1,628 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_broker +version_added: 6.0.0 +short_description: MQ broker management +description: + - Create/update/delete a broker. + - Reboot a broker. +author: + - FCO (@fotto) +options: + broker_name: + description: + - The Name of the MQ broker to work on. + type: str + required: true + state: + description: + - "C(present): Create/update broker." + - "C(absent): Delete broker." + - "C(restarted): Reboot broker." + choices: [ 'present', 'absent', 'restarted' ] + default: present + type: str + deployment_mode: + description: + - Set broker deployment type. + - Can be used only during creation. + - Defaults to C(SINGLE_INSTANCE). + choices: [ 'SINGLE_INSTANCE', 'ACTIVE_STANDBY_MULTI_AZ', 'CLUSTER_MULTI_AZ' ] + type: str + use_aws_owned_key: + description: + - Must be set to C(false) if I(kms_key_id) is provided as well. + - Can be used only during creation. + - Defaults to C(true). + type: bool + kms_key_id: + description: + - Use referenced key to encrypt broker data at rest. + - Can be used only during creation. + type: str + engine_type: + description: + - Set broker engine type. + - Can be used only during creation. + - Defaults to C(ACTIVEMQ). + choices: [ 'ACTIVEMQ', 'RABBITMQ' ] + type: str + maintenance_window_start_time: + description: + - Set maintenance window for automatic minor upgrades. + - Can be used only during creation. + - Not providing any value means "no maintenance window". + type: dict + publicly_accessible: + description: + - Allow/disallow public access. + - Can be used only during creation. + - Defaults to C(false). + type: bool + storage_type: + description: + - Set underlying storage type. + - Can be used only during creation. + - Defaults to C(EFS). + choices: [ 'EBS', 'EFS' ] + type: str + subnet_ids: + description: + - Defines where deploy broker instances to. + - Minimum required number depends on deployment type. + - Can be used only during creation. + type: list + elements: str + users: + description: + - This parameter allows to use a custom set of initial user(s). + - M(community.aws.mq_user) is the preferred way to manage (local) users + however a broker cannot be created without any user. + - If nothing is specified a default C(admin) user will be created along with brokers. + - Can be used only during creation. Use M(community.aws.mq_user) module for updates. + type: list + elements: dict + tags: + description: + - Tag newly created brokers. + - Can be used only during creation. + type: dict + authentication_strategy: + description: Choose between locally and remotely managed users. + choices: [ 'SIMPLE', 'LDAP' ] + type: str + auto_minor_version_upgrade: + description: Allow/disallow automatic minor version upgrades. + type: bool + default: true + engine_version: + description: + - Set engine version of broker. + - The special value C(latest) will pick the latest available version. + - The special value C(latest) is ignored on update. + type: str + host_instance_type: + description: Instance type of broker instances. + type: str + enable_audit_log: + description: Enable/disable to push audit logs to AWS CloudWatch. + type: bool + default: false + enable_general_log: + description: Enable/disable to push general logs to AWS CloudWatch. + type: bool + default: false + security_groups: + description: + - Associate security groups with broker. + - At least one must be provided during creation. + type: list + elements: str + wait: + description: + - Specifies whether the module waits for the desired C(state). + - The time to wait can be controlled by setting I(wait_timeout). + type: bool + default: false + version_added: 7.1.0 + wait_timeout: + description: + - How long to wait (in seconds) for the broker to reach the desired state if I(wait=true). + default: 900 + type: int + version_added: 7.1.0 + +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + + +EXAMPLES = r""" +- name: create broker (if missing) with minimal required parameters + community.aws.mq_broker: + broker_name: "{{ broker_name }}" + security_groups: + - sg_xxxxxxx + subnet_ids: + - subnet_xxx + - subnet_yyy + register: result + +- set_fact: + broker_id: "{{ result.broker['BrokerId'] }}" + +- name: use mq_broker_info to wait until broker is ready + community.aws.mq_broker_info: + broker_id: "{{ broker_id }}" + register: result + until: "result.broker['BrokerState'] == 'RUNNING'" + retries: 15 + delay: 60 + +- name: create or update broker with almost all parameter set including credentials + community.aws.mq_broker: + broker_name: "my_broker_2" + state: present + deployment_mode: 'ACTIVE_STANDBY_MULTI_AZ' + use_aws_owned_key: false + kms_key_id: 'my-precreted-key-id' + engine_type: 'ACTIVEMQ' + maintenance_window_start_time: + DayOfWeek: 'MONDAY' + TimeOfDay: '03:15' + TimeZone: 'Europe/Berlin' + publicly_accessible: true + storage_type: 'EFS' + security_groups: + - sg_xxxxxxx + subnet_ids: + - subnet_xxx + - subnet_yyy + users: + - Username: 'initial-user' + Password': 'plain-text-password' + ConsoleAccess: true + tags: + env: Test + creator: ansible + authentication_strategy: 'SIMPLE' + auto_minor_version_upgrade: true + engine_version: "5.15.13" + host_instance_type: 'mq.t3.micro' + enable_audit_log: true + enable_general_log: true + +- name: reboot a broker + community.aws.mq_broker: + broker_name: "my_broker_2" + state: restarted + +- name: delete a broker + community.aws.mq_broker: + broker_name: "my_broker_2" + state: absent +""" + +RETURN = r""" +broker: + description: + - "All API responses are converted to snake yaml except 'Tags'" + - "'state=present': API response of create_broker() or update_broker() call" + - "'state=absent': result of describe_broker() call before delete_broker() is triggerd" + - "'state=restarted': result of describe_broker() after reboot has been triggered" + type: dict + returned: success +""" + +try: + import botocore +except ImportError: + # handled by AnsibleAWSModule + pass + +from time import sleep +from time import time + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +PARAMS_MAP = { + "authentication_strategy": "AuthenticationStrategy", + "auto_minor_version_upgrade": "AutoMinorVersionUpgrade", + "broker_name": "BrokerName", + "deployment_mode": "DeploymentMode", + "use_aws_owned_key": "EncryptionOptions/UseAwsOwnedKey", + "kms_key_id": "EncryptionOptions/KmsKeyId", + "engine_type": "EngineType", + "engine_version": "EngineVersion", + "host_instance_type": "HostInstanceType", + "enable_audit_log": "Logs/Audit", + "enable_general_log": "Logs/General", + "maintenance_window_start_time": "MaintenanceWindowStartTime", + "publicly_accessible": "PubliclyAccessible", + "security_groups": "SecurityGroups", + "storage_type": "StorageType", + "subnet_ids": "SubnetIds", + "users": "Users", + "tags": "Tags", +} + + +DEFAULTS = { + "authentication_strategy": "SIMPLE", + "auto_minor_version_upgrade": False, + "deployment_mode": "SINGLE_INSTANCE", + "use_aws_owned_key": True, + "engine_type": "ACTIVEMQ", + "engine_version": "latest", + "host_instance_type": "mq.t3.micro", + "enable_audit_log": False, + "enable_general_log": False, + "publicly_accessible": False, + "storage_type": "EFS", +} + +CREATE_ONLY_PARAMS = [ + "deployment_mode", + "use_aws_owned_key", + "kms_key_id", + "engine_type", + "maintenance_window_start_time", + "publicly_accessible", + "storage_type", + "subnet_ids", + "users", + "tags", +] + + +def _set_kwarg(kwargs, key, value): + mapped_key = PARAMS_MAP[key] + if "/" in mapped_key: + key_list = mapped_key.split("/") + key_list.reverse() + else: + key_list = [mapped_key] + data = kwargs + while len(key_list) > 1: + this_key = key_list.pop() + if this_key not in data: + data[this_key] = {} + # + data = data[this_key] + data[key_list[0]] = value + + +def _fill_kwargs(module, apply_defaults=True, ignore_create_params=False): + kwargs = {} + if apply_defaults: + for p_name, p_value in DEFAULTS.items(): + _set_kwarg(kwargs, p_name, p_value) + for p_name in module.params: + if ignore_create_params and p_name in CREATE_ONLY_PARAMS: + # silently ignore CREATE_ONLY_PARAMS on update to + # make playbooks idempotent + continue + if p_name in PARAMS_MAP and module.params[p_name] is not None: + _set_kwarg(kwargs, p_name, module.params[p_name]) + else: + # ignore + pass + return kwargs + + +def __list_needs_change(current, desired): + if len(current) != len(desired): + return True + # equal length: + c_sorted = sorted(current) + d_sorted = sorted(desired) + for index, value in enumerate(current): + if value != desired[index]: + return True + # + return False + + +def __dict_needs_change(current, desired): + # values contained in 'current' but not specified in 'desired' are ignored + # value contained in 'desired' but not in 'current' (unsupported attributes) are ignored + for key in desired: + if key in current: + if desired[key] != current[key]: + return True + # + return False + + +def _needs_change(current, desired): + needs_change = False + for key in desired: + current_value = current[key] + desired_value = desired[key] + if isinstance(current_value, (int, str, bool)): + if current_value != desired_value: + needs_change = True + break + elif isinstance(current_value, list): + # assumption: all 'list' type settings we allow changes for have scalar values + if __list_needs_change(current_value, desired_value): + needs_change = True + break + elif isinstance(current_value, dict): + # assumption: all 'dict' type settings we allow changes for have scalar values + if __dict_needs_change(current_value, desired_value): + needs_change = True + break + else: + # unexpected type + needs_change = True + break + # + return needs_change + + +def get_latest_engine_version(conn, module, engine_type): + try: + response = conn.describe_broker_engine_types(EngineType=engine_type) + return response["BrokerEngineTypes"][0]["EngineVersions"][0]["Name"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list engine versions") + + +def get_broker_id(conn, module): + try: + broker_name = module.params["broker_name"] + broker_id = None + response = conn.list_brokers(MaxResults=100) + for broker in response["BrokerSummaries"]: + if broker["BrokerName"] == broker_name: + broker_id = broker["BrokerId"] + break + return broker_id + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list broker brokers.") + + +def get_broker_info(conn, module, broker_id): + try: + return conn.describe_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get broker details.") + + +def wait_for_status(conn, module): + interval_secs = 5 + timeout = module.params.get("wait_timeout", 900) + broker_name = module.params.get("broker_name") + desired_state = module.params.get("state") + done = False + + paginator = conn.get_paginator("list_brokers") + page_iterator = paginator.paginate(PaginationConfig={"MaxItems": 100, "PageSize": 100, "StartingToken": ""}) + wait_timeout = time() + timeout + + while wait_timeout > time(): + try: + filtered_iterator = page_iterator.search(f"BrokerSummaries[?BrokerName == `{broker_name}`][]") + broker_list = list(filtered_iterator) + + if module.check_mode: + return + + if len(broker_list) < 1 and desired_state == "absent": + done = True + break + + if desired_state in ["present", "rebooted"] and broker_list[0]["BrokerState"] == "RUNNING": + done = True + break + + if broker_list[0]["BrokerState"] == "CREATION_FAILED": + break + + sleep(interval_secs) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't paginate brokers.") + + if not done: + module.fail_json(msg="desired state not reached") + + +def reboot_broker(conn, module, broker_id): + wait = module.params.get("wait") + + try: + response = conn.reboot_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't reboot broker.") + + if wait: + wait_for_status(conn, module) + + return response + + +def delete_broker(conn, module, broker_id): + wait = module.params.get("wait") + + try: + response = conn.delete_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete broker.") + + if wait: + wait_for_status(conn, module) + + return response + + +def create_broker(conn, module): + kwargs = _fill_kwargs(module) + wait = module.params.get("wait") + + if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": + kwargs["EngineVersion"] = get_latest_engine_version(conn, module, kwargs["EngineType"]) + if kwargs["AuthenticationStrategy"] == "LDAP": + module.fail_json(msg="'AuthenticationStrategy=LDAP' not supported, yet") + if "Users" not in kwargs: + # add some stupid default (cannot create broker without any users) + kwargs["Users"] = [{"Username": "admin", "Password": "adminPassword", "ConsoleAccess": True, "Groups": []}] + if "EncryptionOptions" in kwargs and "UseAwsOwnedKey" in kwargs["EncryptionOptions"]: + kwargs["EncryptionOptions"]["UseAwsOwnedKey"] = False + # + if "SecurityGroups" not in kwargs or len(kwargs["SecurityGroups"]) == 0: + module.fail_json(msg="At least one security group must be specified on broker creation") + # + changed = True + result = conn.create_broker(**kwargs) + # + if wait: + wait_for_status(conn, module) + + return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": changed} + + +def update_broker(conn, module, broker_id): + kwargs = _fill_kwargs(module, apply_defaults=False, ignore_create_params=True) + wait = module.params.get("wait") + # replace name with id + broker_name = kwargs["BrokerName"] + del kwargs["BrokerName"] + kwargs["BrokerId"] = broker_id + # get current state for comparison: + api_result = get_broker_info(conn, module, broker_id) + if api_result["BrokerState"] != "RUNNING": + module.fail_json( + msg=f"Cannot trigger update while broker ({broker_id}) is in state {api_result['BrokerState']}", + ) + # engine version of 'latest' is taken as "keep current one" + # i.e. do not request upgrade on playbook rerun + if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": + kwargs["EngineVersion"] = api_result["EngineVersion"] + result = {"broker_id": broker_id, "broker_name": broker_name} + changed = False + if _needs_change(api_result, kwargs): + changed = True + if not module.check_mode: + api_result = conn.update_broker(**kwargs) + # + # + if wait: + wait_for_status(conn, module) + + return {"broker": result, "changed": changed} + + +def ensure_absent(conn, module): + result = {"broker_name": module.params["broker_name"], "broker_id": None} + if module.check_mode: + return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": True} + broker_id = get_broker_id(conn, module) + result["broker_id"] = broker_id + + if not broker_id: + # silently ignore delete of unknown broker (to make it idempotent) + return {"broker": result, "changed": False} + + try: + # check for pending delete (small race condition possible here + api_result = get_broker_info(conn, module, broker_id) + if api_result["BrokerState"] == "DELETION_IN_PROGRESS": + return {"broker": result, "changed": False} + delete_broker(conn, module, broker_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + return {"broker": result, "changed": True} + + +def ensure_present(conn, module): + if module.check_mode: + return {"broker": {"broker_arn": "fakeArn", "broker_id": "fakeId"}, "changed": True} + + broker_id = get_broker_id(conn, module) + if broker_id: + return update_broker(conn, module, broker_id) + + return create_broker(conn, module) + + +def main(): + argument_spec = dict( + broker_name=dict(required=True, type="str"), + state=dict(default="present", choices=["present", "absent", "restarted"]), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=900, type="int"), + # parameters only allowed on create + deployment_mode=dict(choices=["SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ"]), + use_aws_owned_key=dict(type="bool"), + kms_key_id=dict(type="str"), + engine_type=dict(choices=["ACTIVEMQ", "RABBITMQ"], type="str"), + maintenance_window_start_time=dict(type="dict"), + publicly_accessible=dict(type="bool"), + storage_type=dict(choices=["EBS", "EFS"]), + subnet_ids=dict(type="list", elements="str"), + users=dict(type="list", elements="dict"), + tags=dict(type="dict"), + # parameters allowed on update as well + authentication_strategy=dict(choices=["SIMPLE", "LDAP"]), + auto_minor_version_upgrade=dict(default=True, type="bool"), + engine_version=dict(type="str"), + host_instance_type=dict(type="str"), + enable_audit_log=dict(default=False, type="bool"), + enable_general_log=dict(default=False, type="bool"), + security_groups=dict(type="list", elements="str"), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + if module.params["state"] == "present": + try: + compound_result = ensure_present(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + # + module.exit_json(**compound_result) + + if module.params["state"] == "absent": + try: + compound_result = ensure_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + # + module.exit_json(**compound_result) + + if module.params["state"] == "restarted": + broker_id = get_broker_id(connection, module) + if module.check_mode: + module.exit_json(broker={"broker_id": broker_id if broker_id else "fakeId"}, changed=True) + if not broker_id: + module.fail_json( + msg="Cannot find broker with name {module.params['broker_name']}.", + ) + try: + changed = True + if not module.check_mode: + reboot_broker(connection, module, broker_id) + # + result = get_broker_info(connection, module, broker_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + module.exit_json(broker=result, changed=changed) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/mq_broker_config.py b/ansible_collections/community/aws/plugins/modules/mq_broker_config.py new file mode 100644 index 000000000..781bbb7d5 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/mq_broker_config.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_broker_config +version_added: 6.0.0 +short_description: Update Amazon MQ broker configuration +description: + - Update configuration for an MQ broker. + - If new configuration differs from the current one a new configuration + is created and the new version is assigned to the broker. + - Optionally allows broker reboot to make changes effective immediately. +author: + - FCO (@fotto) +options: + broker_id: + description: + - The ID of the MQ broker to work on. + type: str + required: true + config_xml: + description: + - The maximum number of results to return. + type: str + required: true + config_description: + description: + - Description to set on new configuration revision. + type: str + reboot: + description: + - Reboot broker after new config has been applied. + type: bool + default: false +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + +EXAMPLES = r""" +- name: send new XML config to broker relying on credentials from environment + community.aws.mq_broker_config: + broker_id: "aws-mq-broker-id" + config_xml: "{{ lookup('file', 'activemq.xml' )}}" + region: "{{ aws_region }}" + +- name: send new XML config to broker and reboot if necessary + community.aws.mq_broker_config: + broker_id: "aws-mq-broker-id" + config_xml: "{{ lookup('file', 'activemq2.xml' )}}" + reboot: true + +- name: send new broker config and set all credentials explicitly + community.aws.mq_broker_config: + broker_id: "{{ broker_id }}" + config_xml: "{{ lookup('file', 'activemq3.xml')}}" + config_description: "custom description for configuration object" + register: result +""" + +RETURN = r""" +broker: + description: API response of describe_broker() converted to snake yaml after changes have been applied. + type: dict + returned: success +configuration: + description: Details about new configuration object. + returned: I(changed=true) + type: complex + contains: + id: + description: Configuration ID of broker configuration. + type: str + example: c-386541b8-3139-42c2-9c2c-a4c267c1714f + revision: + description: Revision of the configuration that will be active after next reboot. + type: int + example: 4 +""" + +import base64 +import re + +try: + import botocore +except ImportError: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +DEFAULTS = {"reboot": False} +FULL_DEBUG = False + + +def is_same_config(old, new): + # we a simple comparision here: strip down spaces and compare the rest + # TODO: use same XML normalizer on new as used by AWS before comparing strings + old_stripped = re.sub(r"\s+", " ", old, flags=re.S).rstrip() + new_stripped = re.sub(r"\s+", " ", new, flags=re.S).rstrip() + return old_stripped == new_stripped + + +def get_broker_info(conn, module): + try: + return conn.describe_broker(BrokerId=module.params["broker_id"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + return { + "broker_id": module.params["broker_id"], + } + module.fail_json_aws(e, msg="Couldn't get broker details.") + + +def get_current_configuration(conn, module, cfg_id, cfg_revision): + try: + return conn.describe_configuration_revision(ConfigurationId=cfg_id, ConfigurationRevision=str(cfg_revision)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get configuration revision.") + + +def create_and_assign_config(conn, module, broker_id, cfg_id, cfg_xml_encoded): + kwargs = {"ConfigurationId": cfg_id, "Data": cfg_xml_encoded} + if "config_description" in module.params and module.params["config_description"]: + kwargs["Description"] = module.params["config_description"] + else: + kwargs["Description"] = "Updated through community.aws.mq_broker_config ansible module" + # + try: + c_response = conn.update_configuration(**kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create new configuration revision.") + # + new_config_revision = c_response["LatestRevision"]["Revision"] + try: + b_response = conn.update_broker( + BrokerId=broker_id, Configuration={"Id": cfg_id, "Revision": new_config_revision} + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't assign new configuration revision to broker.") + # + return (c_response, b_response) + + +def reboot_broker(conn, module, broker_id): + try: + return conn.reboot_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't reboot broker.") + + +def ensure_config(conn, module): + broker_id = module.params["broker_id"] + broker_info = get_broker_info(conn, module) + changed = False + if module.check_mode and "Configurations" not in broker_info: + # not result from get_broker_info(). use requeste config + current_cfg_decoded = module.params["config_xml"] + else: + current_cfg = broker_info["Configurations"]["Current"] + if "Pending" in broker_info["Configurations"]: + current_cfg = broker_info["Configurations"]["Pending"] + current_cfg_encoded = get_current_configuration(conn, module, current_cfg["Id"], current_cfg["Revision"])[ + "Data" + ] + current_cfg_decoded = base64.b64decode(current_cfg_encoded.encode()).decode() + + if is_same_config(current_cfg_decoded, module.params["config_xml"]): + return {"changed": changed, "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"])} + + (c_response, _b_response) = (None, None) + if not module.check_mode: + new_cfg_encoded = base64.b64encode(module.params["config_xml"].encode()).decode() + (c_response, _b_response) = create_and_assign_config( + conn, module, broker_id, current_cfg["Id"], new_cfg_encoded + ) + # + changed = True + + if changed and module.params["reboot"] and not module.check_mode: + reboot_broker(conn, module, broker_id) + # + broker_info = get_broker_info(conn, module) + return_struct = { + "changed": changed, + "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"]), + "configuration": {"id": c_response["Id"], "revision": c_response["LatestRevision"]["Revision"]}, + } + if FULL_DEBUG: + return_struct["old_config_xml"] = base64.b64decode(current_cfg_encoded) + return_struct["new_config_xml"] = module.params["config_xml"] + return_struct["old_config_revision"] = current_cfg["Revision"] + return return_struct + + +def main(): + argument_spec = dict( + broker_id=dict(required=True, type="str"), + config_xml=dict(required=True, type="str"), + config_description=dict(required=False, type="str"), + reboot=dict(required=False, type="bool", default=DEFAULTS["reboot"]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + try: + result = ensure_config(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/mq_broker_info.py b/ansible_collections/community/aws/plugins/modules/mq_broker_info.py new file mode 100644 index 000000000..e760e0179 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/mq_broker_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_broker_info +version_added: 6.0.0 +short_description: Retrieve MQ Broker details +description: + - Get details about a broker. +author: + - FCO (@fotto) +options: + broker_id: + description: Get details for broker with specified ID. + type: str + broker_name: + description: + - Get details for broker with specified Name. + - Is ignored if I(broker_id) is specified. + type: str +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + + +EXAMPLES = r""" +- name: get current broker settings by id + community.aws.mq_broker_info: + broker_id: "aws-mq-broker-id" + register: broker_info + +- name: get current broker settings by name setting all credential parameters explicitly + community.aws.mq_broker_info: + broker_name: "aws-mq-broker-name" + register: broker_info +""" + +RETURN = r""" +broker: + description: API response of describe_broker() converted to snake yaml. + type: dict + returned: success +""" + +try: + import botocore +except ImportError: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def get_broker_id(conn, module): + try: + broker_name = module.params["broker_name"] + broker_id = None + response = conn.list_brokers(MaxResults=100) + for broker in response["BrokerSummaries"]: + if broker["BrokerName"] == broker_name: + broker_id = broker["BrokerId"] + break + return broker_id + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list broker brokers.") + + +def get_broker_info(conn, module, broker_id): + try: + return conn.describe_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + module.exit_json(broker={"broker_id": broker_id, "broker_name": "fakeName"}) + else: + module.fail_json_aws(e, msg="Couldn't get broker details.") + + +def main(): + argument_spec = dict(broker_id=dict(type="str"), broker_name=dict(type="str")) + required_one_of = ( + ( + "broker_name", + "broker_id", + ), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + supports_check_mode=True, + ) + broker_id = module.params["broker_id"] + broker_name = module.params["broker_name"] + + connection = module.client("mq") + + try: + if not broker_id: + broker_id = get_broker_id(connection, module) + if not broker_id: + if module.check_mode: + module.exit_json( + broker={"broker_id": "fakeId", "broker_name": broker_name if broker_name else "fakeName"} + ) + result = get_broker_info(connection, module, broker_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + # + module.exit_json(broker=camel_dict_to_snake_dict(result, ignore_list=["Tags"])) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/mq_user.py b/ansible_collections/community/aws/plugins/modules/mq_user.py new file mode 100644 index 000000000..68e1fd629 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/mq_user.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_user +version_added: 6.0.0 +short_description: Manage users in existing Amazon MQ broker +description: + - Manage Amazon MQ users. + - Pending changes are taking into account for idempotency. +author: + - FCO (@fotto) +options: + broker_id: + description: + - The ID of the MQ broker to work on. + type: str + required: true + username: + description: + - The name of the user to create/update/delete. + type: str + required: true + state: + description: + - Create/Update vs Delete of user. + default: present + choices: [ 'present', 'absent' ] + type: str + console_access: + description: + - Whether the user can access the MQ Console. + - Defaults to C(false) on creation. + type: bool + groups: + description: + - Set group memberships for user. + - Defaults to C([]) on creation. + type: list + elements: str + password: + description: + - Set password for user. + - Defaults to a random password on creation. + - Ignored unless I(allow_pw_update=true). + type: str + allow_pw_update: + description: + - When I(allow_pw_update=true) and I(password) is set, the password + will always be updated for the user. + default: false + type: bool +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + +EXAMPLES = r""" +- name: create/update user - set provided password if user doesn't exist, yet + community.aws.mq_user: + state: present + broker_id: "aws-mq-broker-id" + username: "sample_user1" + console_access: false + groups: ["g1", "g2"] + password: "plain-text-password" + +- name: allow console access and update group list - relying on default state + community.aws.mq_user: + broker_id: "aws-mq-broker-id" + username: "sample_user1" + region: "{{ aws_region }}" + console_access: true + groups: ["g1", "g2", "g3"] + +- name: remove user - setting all credentials explicitly + community.aws.mq_user: + state: absent + broker_id: "aws-mq-broker-id" + username: "other_user" +""" + +RETURN = r""" +user: + description: + - just echos the username + - "only present when state=present" + type: str + returned: success +""" + +import secrets + +try: + import botocore +except ImportError as ex: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +CREATE_DEFAULTS = { + "console_access": False, + "groups": [], +} + + +def _group_change_required(user_response, requested_groups): + current_groups = [] + if "Groups" in user_response: + current_groups = user_response["Groups"] + elif "Pending" in user_response: + # to support automatic testing without broker reboot + current_groups = user_response["Pending"]["Groups"] + if len(current_groups) != len(requested_groups): + return True + if len(current_groups) != len(set(current_groups) & set(requested_groups)): + return True + # + return False + + +def _console_access_change_required(user_response, requested_boolean): + current_boolean = CREATE_DEFAULTS["console_access"] + if "ConsoleAccess" in user_response: + current_boolean = user_response["ConsoleAccess"] + elif "Pending" in user_response: + # to support automatic testing without broker reboot + current_boolean = user_response["Pending"]["ConsoleAccess"] + # + return current_boolean != requested_boolean + + +def generate_password(): + return secrets.token_hex(20) + + +# returns API response object +def _create_user(conn, module): + kwargs = {"BrokerId": module.params["broker_id"], "Username": module.params["username"]} + if "groups" in module.params and module.params["groups"] is not None: + kwargs["Groups"] = module.params["groups"] + else: + kwargs["Groups"] = CREATE_DEFAULTS["groups"] + if "password" in module.params and module.params["password"]: + kwargs["Password"] = module.params["password"] + else: + kwargs["Password"] = generate_password() + if "console_access" in module.params and module.params["console_access"] is not None: + kwargs["ConsoleAccess"] = module.params["console_access"] + else: + kwargs["ConsoleAccess"] = CREATE_DEFAULTS["console_access"] + try: + response = conn.create_user(**kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create user") + return response + + +# returns API response object +def _update_user(conn, module, kwargs): + try: + response = conn.update_user(**kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't update user") + return response + + +def get_matching_user(conn, module, broker_id, username): + try: + response = conn.describe_user(BrokerId=broker_id, Username=username) + except is_boto3_error_code("NotFoundException"): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get user details") + return response + + +def ensure_user_present(conn, module): + user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) + changed = False + + if user is None: + if not module.check_mode: + _response = _create_user(conn, module) + changed = True + else: + kwargs = {} + if "groups" in module.params and module.params["groups"] is not None: + if _group_change_required(user, module.params["groups"]): + kwargs["Groups"] = module.params["groups"] + if "console_access" in module.params and module.params["console_access"] is not None: + if _console_access_change_required(user, module.params["console_access"]): + kwargs["ConsoleAccess"] = module.params["console_access"] + if "password" in module.params and module.params["password"]: + if "allow_pw_update" in module.params and module.params["allow_pw_update"]: + kwargs["Password"] = module.params["password"] + if len(kwargs) == 0: + changed = False + else: + if not module.check_mode: + kwargs["BrokerId"] = module.params["broker_id"] + kwargs["Username"] = module.params["username"] + response = _update_user(conn, module, kwargs) + # + changed = True + # + user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) + + return {"changed": changed, "user": camel_dict_to_snake_dict(user, ignore_list=["Tags"])} + + +def ensure_user_absent(conn, module): + user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) + result = {"changed": False} + if user is None: + return result + # better support for testing + if "Pending" in user and "PendingChange" in user["Pending"] and user["Pending"]["PendingChange"] == "DELETE": + return result + + result = {"changed": True} + if module.check_mode: + return result + + try: + conn.delete_user(BrokerId=user["BrokerId"], Username=user["Username"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete user") + + return result + + +def main(): + argument_spec = dict( + broker_id=dict(required=True, type="str"), + username=dict(required=True, type="str"), + console_access=dict(required=False, type="bool"), + groups=dict(required=False, type="list", elements="str"), + password=dict(required=False, type="str", no_log=True), + allow_pw_update=dict(default=False, required=False, type="bool"), + state=dict(default="present", choices=["present", "absent"]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + state = module.params.get("state") + + try: + if state == "present": + result = ensure_user_present(connection, module) + elif state == "absent": + result = ensure_user_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/mq_user_info.py b/ansible_collections/community/aws/plugins/modules/mq_user_info.py new file mode 100644 index 000000000..64cf92da7 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/mq_user_info.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_user_info +version_added: 6.0.0 +short_description: List users of an Amazon MQ broker +description: + - List users for the specified broker ID. + - Pending creations and deletions can be skipped by options. +author: + - FCO (@fotto) +options: + broker_id: + description: + - The ID of the MQ broker to work on. + type: str + required: true + max_results: + description: + - The maximum number of results to return. + type: int + default: 100 + skip_pending_create: + description: + - Will skip pending creates from the result set. + type: bool + default: false + skip_pending_delete: + description: + - Will skip pending deletes from the result set. + type: bool + default: false + as_dict: + description: + - Convert result into lookup table by username. + type: bool + default: false + +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + + +EXAMPLES = r""" +- name: get all users as list - relying on environment for API credentials + community.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + max_results: 50 + register: result + +- name: get users as dict - explicitly specifying all credentials + community.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + register: result + +- name: get list of users to decide which may need to be deleted + community.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + skip_pending_delete: true + +- name: get list of users to decide which may need to be created + community.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + skip_pending_create: true +""" + +RETURN = r""" +users: + type: dict + returned: success + description: + - dict key is username + - each entry is the record for a user as returned by API but converted to snake yaml +""" + +try: + import botocore +except ImportError as ex: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + +DEFAULTS = {"max_results": 100, "skip_pending_create": False, "skip_pending_delete": False, "as_dict": True} + + +def get_user_info(conn, module): + try: + response = conn.list_users(BrokerId=module.params["broker_id"], MaxResults=module.params["max_results"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + # return empty set for unknown broker in check mode + if DEFAULTS["as_dict"]: + return {} + return [] + module.fail_json_aws(e, msg="Failed to describe users") + # + if not module.params["skip_pending_create"] and not module.params["skip_pending_delete"]: + # we can simply return the sub-object from the response + records = response["Users"] + else: + records = [] + for record in response["Users"]: + if "PendingChange" in record: + if record["PendingChange"] == "CREATE" and module.params["skip_pending_create"]: + continue + if record["PendingChange"] == "DELETE" and module.params["skip_pending_delete"]: + continue + # + records.append(record) + # + if DEFAULTS["as_dict"]: + user_records = {} + for record in records: + user_records[record["Username"]] = record + # + return camel_dict_to_snake_dict(user_records, ignore_list=["Tags"]) + + return camel_dict_to_snake_dict(records, ignore_list=["Tags"]) + + +def main(): + argument_spec = dict( + broker_id=dict(required=True, type="str"), + max_results=dict(required=False, type="int", default=DEFAULTS["max_results"]), + skip_pending_create=dict(required=False, type="bool", default=DEFAULTS["skip_pending_create"]), + skip_pending_delete=dict(required=False, type="bool", default=DEFAULTS["skip_pending_delete"]), + as_dict=dict(required=False, type="bool", default=False), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + try: + user_records = get_user_info(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(users=user_records) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/msk_cluster.py b/ansible_collections/community/aws/plugins/modules/msk_cluster.py index 75c7fa829..aa0383294 100644 --- a/ansible_collections/community/aws/plugins/modules/msk_cluster.py +++ b/ansible_collections/community/aws/plugins/modules/msk_cluster.py @@ -1,12 +1,9 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - DOCUMENTATION = r""" --- module: msk_cluster @@ -207,16 +204,16 @@ options: description: How many seconds to wait. Cluster creation can take up to 20-30 minutes. type: int default: 3600 -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags notes: - All operations are time consuming, for example create takes 20-30 minutes, update kafka version -- more than one hour, update configuration -- 10-15 minutes; - Cluster's brokers get evenly distributed over a number of availability zones that's equal to the number of subnets. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 + - amazon.aws.tags """ EXAMPLES = r""" @@ -274,12 +271,12 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - camel_dict_to_snake_dict, - compare_aws_tags, - AWSRetry, -) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule @AWSRetry.jittered_backoff(retries=5, delay=5) @@ -304,7 +301,7 @@ def find_cluster_by_name(client, module, cluster_name): module.fail_json_aws(e, "Failed to find kafka cluster by name") if cluster_list: if len(cluster_list) != 1: - module.fail_json(msg="Found more than one cluster with name '{0}'".format(cluster_name)) + module.fail_json(msg=f"Found more than one cluster with name '{cluster_name}'") return cluster_list[0] return {} @@ -343,11 +340,7 @@ def wait_for_cluster_state(client, module, arn, state="ACTIVE"): if current_state == state: return if time.time() - start > timeout: - module.fail_json( - msg="Timeout waiting for cluster {0} (desired state is '{1}')".format( - current_state, state - ) - ) + module.fail_json(msg=f"Timeout waiting for cluster {current_state} (desired state is '{state}')") time.sleep(check_interval) @@ -367,7 +360,7 @@ def prepare_create_options(module): "BrokerNodeGroupInfo": { "ClientSubnets": module.params["subnets"], "InstanceType": module.params["instance_type"], - } + }, } if module.params["security_groups"] and len(module.params["security_groups"]) != 0: @@ -375,9 +368,7 @@ def prepare_create_options(module): if module.params["ebs_volume_size"]: c_params["BrokerNodeGroupInfo"]["StorageInfo"] = { - "EbsStorageInfo": { - "VolumeSize": module.params.get("ebs_volume_size") - } + "EbsStorageInfo": {"VolumeSize": module.params.get("ebs_volume_size")} } if module.params["encryption"]: @@ -388,7 +379,7 @@ def prepare_create_options(module): } c_params["EncryptionInfo"]["EncryptionInTransit"] = { "ClientBroker": module.params["encryption"]["in_transit"].get("client_broker", "TLS"), - "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True) + "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True), } if module.params["authentication"]: @@ -428,12 +419,8 @@ def prepare_open_monitoring_options(module): open_monitoring = module.params["open_monitoring"] or {} m_params["OpenMonitoring"] = { "Prometheus": { - "JmxExporter": { - "EnabledInBroker": open_monitoring.get("jmx_exporter", False) - }, - "NodeExporter": { - "EnabledInBroker": open_monitoring.get("node_exporter", False) - } + "JmxExporter": {"EnabledInBroker": open_monitoring.get("jmx_exporter", False)}, + "NodeExporter": {"EnabledInBroker": open_monitoring.get("node_exporter", False)}, } } return m_params @@ -445,36 +432,26 @@ def prepare_logging_options(module): if logging.get("cloudwatch"): l_params["CloudWatchLogs"] = { "Enabled": module.params["logging"]["cloudwatch"].get("enabled"), - "LogGroup": module.params["logging"]["cloudwatch"].get("log_group") + "LogGroup": module.params["logging"]["cloudwatch"].get("log_group"), } else: - l_params["CloudWatchLogs"] = { - "Enabled": False - } + l_params["CloudWatchLogs"] = {"Enabled": False} if logging.get("firehose"): l_params["Firehose"] = { "Enabled": module.params["logging"]["firehose"].get("enabled"), - "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream") + "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream"), } else: - l_params["Firehose"] = { - "Enabled": False - } + l_params["Firehose"] = {"Enabled": False} if logging.get("s3"): l_params["S3"] = { "Enabled": module.params["logging"]["s3"].get("enabled"), "Bucket": module.params["logging"]["s3"].get("bucket"), - "Prefix": module.params["logging"]["s3"].get("prefix") + "Prefix": module.params["logging"]["s3"].get("prefix"), } else: - l_params["S3"] = { - "Enabled": False - } - return { - "LoggingInfo": { - "BrokerLogs": l_params - } - } + l_params["S3"] = {"Enabled": False} + return {"LoggingInfo": {"BrokerLogs": l_params}} def create_or_update_cluster(client, module): @@ -488,7 +465,6 @@ def create_or_update_cluster(client, module): cluster = find_cluster_by_name(client, module, module.params["name"]) if not cluster: - changed = True if module.check_mode: @@ -508,7 +484,6 @@ def create_or_update_cluster(client, module): wait_for_cluster_state(client, module, arn=response["ClusterArn"], state="ACTIVE") else: - response["ClusterArn"] = cluster["ClusterArn"] response["changes"] = {} @@ -517,9 +492,7 @@ def create_or_update_cluster(client, module): "broker_count": { "current_value": cluster["NumberOfBrokerNodes"], "target_value": module.params.get("nodes"), - "update_params": { - "TargetNumberOfBrokerNodes": module.params.get("nodes") - } + "update_params": {"TargetNumberOfBrokerNodes": module.params.get("nodes")}, }, "broker_storage": { "current_value": cluster["BrokerNodeGroupInfo"]["StorageInfo"]["EbsStorageInfo"]["VolumeSize"], @@ -528,14 +501,12 @@ def create_or_update_cluster(client, module): "TargetBrokerEBSVolumeInfo": [ {"KafkaBrokerNodeId": "All", "VolumeSizeGB": module.params.get("ebs_volume_size")} ] - } + }, }, "broker_type": { "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"], "target_value": module.params.get("instance_type"), - "update_params": { - "TargetInstanceType": module.params.get("instance_type") - } + "update_params": {"TargetInstanceType": module.params.get("instance_type")}, }, "cluster_configuration": { "current_value": { @@ -549,51 +520,44 @@ def create_or_update_cluster(client, module): "update_params": { "ConfigurationInfo": { "Arn": module.params.get("configuration_arn"), - "Revision": module.params.get("configuration_revision") + "Revision": module.params.get("configuration_revision"), } - } + }, }, "cluster_kafka_version": { "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"], "target_value": module.params.get("version"), - "update_params": { - "TargetKafkaVersion": module.params.get("version") - } + "update_params": {"TargetKafkaVersion": module.params.get("version")}, }, "enhanced_monitoring": { "current_value": cluster["EnhancedMonitoring"], "target_value": module.params.get("enhanced_monitoring"), "update_method": "update_monitoring", - "update_params": prepare_enhanced_monitoring_options(module) + "update_params": prepare_enhanced_monitoring_options(module), }, "open_monitoring": { - "current_value": { - "OpenMonitoring": cluster["OpenMonitoring"] - }, + "current_value": {"OpenMonitoring": cluster["OpenMonitoring"]}, "target_value": prepare_open_monitoring_options(module), "update_method": "update_monitoring", - "update_params": prepare_open_monitoring_options(module) + "update_params": prepare_open_monitoring_options(module), }, "logging": { - "current_value": { - "LoggingInfo": cluster["LoggingInfo"] - }, + "current_value": {"LoggingInfo": cluster["LoggingInfo"]}, "target_value": prepare_logging_options(module), "update_method": "update_monitoring", - "update_params": prepare_logging_options(module) - } + "update_params": prepare_logging_options(module), + }, } for method, options in msk_cluster_changes.items(): - - if 'botocore_version' in options: + if "botocore_version" in options: if not module.botocore_at_least(options["botocore_version"]): continue try: update_method = getattr(client, options.get("update_method", "update_" + method)) except AttributeError as e: - module.fail_json_aws(e, "There is no update method 'update_{0}'".format(method)) + module.fail_json_aws(e, f"There is no update method 'update_{method}'") if options["current_value"] != options["target_value"]: changed = True @@ -609,23 +573,17 @@ def create_or_update_cluster(client, module): wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") else: module.fail_json( - msg="Cluster can be updated only in active state, current state is '{0}'. check cluster state or use wait option".format( - state - ) + msg=f"Cluster can be updated only in active state, current state is '{state}'. check cluster state or use wait option" ) try: response["changes"][method] = update_method( - ClusterArn=cluster["ClusterArn"], - CurrentVersion=version, - **options["update_params"] + ClusterArn=cluster["ClusterArn"], CurrentVersion=version, **options["update_params"] ) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, "Failed to update cluster via 'update_{0}'".format(method) - ) + module.fail_json_aws(e, f"Failed to update cluster via 'update_{method}'") if module.params["wait"]: wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") @@ -636,15 +594,15 @@ def create_or_update_cluster(client, module): def update_cluster_tags(client, module, arn): - new_tags = module.params.get('tags') + new_tags = module.params.get("tags") if new_tags is None: return False - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") try: - existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)['Tags'] + existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn)) + module.fail_json_aws(e, msg=f"Unable to retrieve tags for cluster '{arn}'") tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) @@ -655,14 +613,13 @@ def update_cluster_tags(client, module, arn): if tags_to_add: client.tag_resource(ResourceArn=arn, Tags=tags_to_add, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for cluster '{0}'".format(arn)) + module.fail_json_aws(e, msg=f"Unable to set tags for cluster '{arn}'") changed = bool(tags_to_add) or bool(tags_to_remove) return changed def delete_cluster(client, module): - cluster = find_cluster_by_name(client, module, module.params["name"]) if module.check_mode: @@ -691,7 +648,6 @@ def delete_cluster(client, module): def main(): - module_args = dict( name=dict(type="str", required=True), state=dict(type="str", choices=["present", "absent"], default="present"), @@ -720,10 +676,7 @@ def main(): type="dict", options=dict( in_cluster=dict(type="bool", default=True), - client_broker=dict( - choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], - default="TLS" - ), + client_broker=dict(choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], default="TLS"), ), ), ), @@ -783,30 +736,28 @@ def main(): ), wait=dict(type="bool", default=False), wait_timeout=dict(type="int", default=3600), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[['state', 'present', ['version', 'configuration_arn', 'configuration_revision', 'subnets']]], - supports_check_mode=True + required_if=[["state", "present", ["version", "configuration_arn", "configuration_revision", "subnets"]]], + supports_check_mode=True, ) client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) if module.params["state"] == "present": if len(module.params["subnets"]) < 2: - module.fail_json( - msg="At least two client subnets should be provided" - ) + module.fail_json(msg="At least two client subnets should be provided") if int(module.params["nodes"]) % int(len(module.params["subnets"])) != 0: module.fail_json( msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter" ) if len(module.params["name"]) > 64: module.fail_json( - module.fail_json(msg='Cluster name "{0}" exceeds 64 character limit'.format(module.params["name"])) + module.fail_json(msg=f"Cluster name \"{module.params['name']}\" exceeds 64 character limit") ) changed, response = create_or_update_cluster(client, module) elif module.params["state"] == "absent": @@ -816,9 +767,7 @@ def main(): bootstrap_broker_string = {} if response.get("ClusterArn") and module.params["state"] == "present": try: - cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)[ - "ClusterInfo" - ] + cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)["ClusterInfo"] if cluster_info.get("State") == "ACTIVE": brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"], aws_retry=True) if brokers.get("BootstrapBrokerString"): @@ -831,9 +780,7 @@ def main(): ) as e: module.fail_json_aws( e, - "Can not obtain information about cluster {0}".format( - response["ClusterArn"] - ), + f"Can not obtain information about cluster {response['ClusterArn']}", ) module.exit_json( diff --git a/ansible_collections/community/aws/plugins/modules/msk_config.py b/ansible_collections/community/aws/plugins/modules/msk_config.py index 812eba16d..2469f9598 100644 --- a/ansible_collections/community/aws/plugins/modules/msk_config.py +++ b/ansible_collections/community/aws/plugins/modules/msk_config.py @@ -1,12 +1,9 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - DOCUMENTATION = r""" --- module: msk_config @@ -44,8 +41,8 @@ options: type: list elements: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 """ @@ -99,18 +96,19 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - camel_dict_to_snake_dict, - AWSRetry, -) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def dict_to_prop(d): """convert dictionary to multi-line properties""" if len(d) == 0: return "" - return "\n".join("{0}={1}".format(k, v) for k, v in d.items()) + return "\n".join(f"{k}={v}" for k, v in d.items()) def prop_to_dict(p): @@ -146,19 +144,13 @@ def find_active_config(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="failed to obtain kafka configurations") - active_configs = list( - item - for item in all_configs - if item["Name"] == name and item["State"] == "ACTIVE" - ) + active_configs = list(item for item in all_configs if item["Name"] == name and item["State"] == "ACTIVE") if active_configs: if len(active_configs) == 1: return active_configs[0] else: - module.fail_json_aws( - msg="found more than one active config with name '{0}'".format(name) - ) + module.fail_json_aws(msg=f"found more than one active config with name '{name}'") return None @@ -195,7 +187,6 @@ def create_config(client, module): # create new configuration if not config: - if module.check_mode: return True, {} @@ -205,7 +196,7 @@ def create_config(client, module): Description=module.params.get("description"), KafkaVersions=module.params.get("kafka_versions"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True + aws_retry=True, ) except ( botocore.exceptions.BotoCoreError, @@ -216,7 +207,9 @@ def create_config(client, module): # update existing configuration (creates new revision) else: # it's required because 'config' doesn't contain 'ServerProperties' - response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"]) + response = get_configuration_revision( + client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"] + ) if not is_configuration_changed(module, response): return False, response @@ -229,7 +222,7 @@ def create_config(client, module): Arn=config["Arn"], Description=module.params.get("description"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True + aws_retry=True, ) except ( botocore.exceptions.BotoCoreError, @@ -270,7 +263,6 @@ def delete_config(client, module): def main(): - module_args = dict( name=dict(type="str", required=True), description=dict(type="str", default=""), @@ -292,7 +284,8 @@ def main(): # return some useless staff in check mode if configuration doesn't exists # can be useful when these options are referenced by other modules during check mode run if module.check_mode and not response.get("Arn"): - arn = "arn:aws:kafka:region:account:configuration/name/id" + account_id, partition = get_aws_account_info(module) + arn = f"arn:{partition}:kafka:{module.region}:{account_id}:configuration/{module.params['name']}/id" revision = 1 server_properties = "" else: diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall.py b/ansible_collections/community/aws/plugins/modules/networkfirewall.py index 9bb6ebb75..f7fe63f33 100644 --- a/ansible_collections/community/aws/plugins/modules/networkfirewall.py +++ b/ansible_collections/community/aws/plugins/modules/networkfirewall.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall short_description: manage AWS Network Firewall firewalls version_added: 4.0.0 @@ -104,34 +102,34 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create an AWS Network Firewall - community.aws.networkfirewall: name: 'ExampleFirewall' state: present policy: 'ExamplePolicy' subnets: - - 'subnet-123456789abcdef01' + - 'subnet-123456789abcdef01' # Create an AWS Network Firewall with various options, don't wait for creation # to finish. - community.aws.networkfirewall: name: 'ExampleFirewall' state: present - delete_protection: True + delete_protection: true description: "An example Description" policy: 'ExamplePolicy' - policy_change_protection: True + policy_change_protection: true subnets: - - 'subnet-123456789abcdef01' - - 'subnet-abcdef0123456789a' - subnet_change_protection: True + - 'subnet-123456789abcdef01' + - 'subnet-abcdef0123456789a' + subnet_change_protection: true tags: ExampleTag: Example Value another_tag: another_example @@ -142,9 +140,9 @@ EXAMPLES = ''' - community.aws.networkfirewall: state: absent name: 'ExampleFirewall' -''' +""" -RETURN = ''' +RETURN = r""" firewall: description: The full details of the firewall returned: success @@ -269,37 +267,35 @@ firewall: } } } -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager def main(): - argument_spec = dict( - name=dict(type='str', required=False, aliases=['firewall_name']), - arn=dict(type='str', required=False, aliases=['firewall_arn']), - state=dict(type='str', required=False, default='present', choices=['present', 'absent']), - description=dict(type='str', required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), - subnet_change_protection=dict(type='bool', required=False), - policy_change_protection=dict(type='bool', required=False, aliases=['firewall_policy_change_protection']), - delete_protection=dict(type='bool', required=False), - subnets=dict(type='list', elements='str', required=False), - purge_subnets=dict(type='bool', required=False, default=True), - policy=dict(type='str', required=False, aliases=['firewall_policy_arn']), + name=dict(type="str", required=False, aliases=["firewall_name"]), + arn=dict(type="str", required=False, aliases=["firewall_arn"]), + state=dict(type="str", required=False, default="present", choices=["present", "absent"]), + description=dict(type="str", required=False), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), + subnet_change_protection=dict(type="bool", required=False), + policy_change_protection=dict(type="bool", required=False, aliases=["firewall_policy_change_protection"]), + delete_protection=dict(type="bool", required=False), + subnets=dict(type="list", elements="str", required=False), + purge_subnets=dict(type="bool", required=False, default=True), + policy=dict(type="str", required=False, aliases=["firewall_policy_arn"]), ) mutually_exclusive = [ - ('arn', 'name',) + ["arn", "name"], ] required_one_of = [ - ('arn', 'name',) + ["arn", "name"], ] module = AnsibleAWSModule( @@ -309,30 +305,30 @@ def main(): required_one_of=required_one_of, ) - arn = module.params.get('arn') - name = module.params.get('name') - state = module.params.get('state') + arn = module.params.get("arn") + name = module.params.get("name") + state = module.params.get("state") manager = NetworkFirewallManager(module, name=name, arn=arn) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': - manager.set_delete_protection(module.params.get('delete_protection', None)) + if state == "absent": + manager.set_delete_protection(module.params.get("delete_protection", None)) manager.delete() else: if not manager.original_resource: - if not module.params.get('subnets', None): - module.fail_json('The subnets parameter must be provided on creation.') - if not module.params.get('policy', None): - module.fail_json('The policy parameter must be provided on creation.') - manager.set_description(module.params.get('description', None)) - manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) - manager.set_subnet_change_protection(module.params.get('subnet_change_protection', None)) - manager.set_policy_change_protection(module.params.get('policy_change_protection', None)) - manager.set_delete_protection(module.params.get('delete_protection', None)) - manager.set_subnets(module.params.get('subnets', None), module.params.get('purge_subnets', None)) - manager.set_policy(module.params.get('policy', None)) + if not module.params.get("subnets", None): + module.fail_json("The subnets parameter must be provided on creation.") + if not module.params.get("policy", None): + module.fail_json("The policy parameter must be provided on creation.") + manager.set_description(module.params.get("description", None)) + manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) + manager.set_subnet_change_protection(module.params.get("subnet_change_protection", None)) + manager.set_policy_change_protection(module.params.get("policy_change_protection", None)) + manager.set_delete_protection(module.params.get("delete_protection", None)) + manager.set_subnets(module.params.get("subnets", None), module.params.get("purge_subnets", None)) + manager.set_policy(module.params.get("policy", None)) manager.flush_changes() results = dict( @@ -344,9 +340,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py index 85df6b026..262a31067 100644 --- a/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py +++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_info short_description: describe AWS Network Firewall firewalls version_added: 4.0.0 @@ -34,14 +32,15 @@ options: elements: str aliases: ['vpcs', 'vpc_id'] -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe all firewalls in an account - community.aws.networkfirewall_info: {} @@ -53,9 +52,9 @@ EXAMPLES = ''' # Describe a firewall by name - community.aws.networkfirewall_info: name: ExampleFirewall -''' +""" -RETURN = ''' +RETURN = r""" firewall_list: description: A list of ARNs of the matching firewalls. type: list @@ -184,32 +183,30 @@ firewalls: } } } -''' +""" - -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager def main(): - argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), - vpc_ids=dict(type='list', required=False, elements='str', aliases=['vpcs', 'vpc_id']), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), + vpc_ids=dict(type="list", required=False, elements="str", aliases=["vpcs", "vpc_id"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name', 'vpc_ids',), + ["arn", "name", "vpc_ids"], ], ) - arn = module.params.get('arn') - name = module.params.get('name') - vpcs = module.params.get('vpc_ids') + arn = module.params.get("arn") + name = module.params.get("name") + vpcs = module.params.get("vpc_ids") manager = NetworkFirewallManager(module) @@ -218,20 +215,20 @@ def main(): if name or arn: firewall = manager.get_firewall(name=name, arn=arn) if firewall: - results['firewalls'] = [firewall] + results["firewalls"] = [firewall] else: - results['firewalls'] = [] + results["firewalls"] = [] else: if vpcs: firewall_list = manager.list(vpc_ids=vpcs) else: firewall_list = manager.list() - results['firewall_list'] = firewall_list + results["firewall_list"] = firewall_list firewalls = [manager.get_firewall(arn=f) for f in firewall_list] - results['firewalls'] = firewalls + results["firewalls"] = firewalls module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py index 1026138a6..c742c9546 100644 --- a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py +++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_policy short_description: manage AWS Network Firewall policies version_added: 4.0.0 @@ -78,7 +76,6 @@ options: C(aws:alert_strict) and C(aws:alert_established). - Only valid for policies where I(strict_rule_order=true). - When creating a new policy defaults to C(aws:drop_strict). - - I(stateful_default_actions) requires botocore>=1.21.52. required: false type: list elements: str @@ -88,7 +85,6 @@ options: - When I(strict_rule_order='strict') rules and rule groups are evaluated in the order that they're defined. - Cannot be updated after creation. - - I(stateful_rule_order) requires botocore>=1.21.52. required: false type: str choices: ['default', 'strict'] @@ -139,17 +135,16 @@ options: type: int required: false - author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create an AWS Network Firewall Policy with default rule order - community.aws.networkfirewall_policy: stateful_rule_order: 'default' @@ -178,9 +173,9 @@ EXAMPLES = ''' - community.aws.networkfirewall_policy: state: absent name: 'ExampleDropPolicy' -''' +""" -RETURN = ''' +RETURN = r""" policy: description: The details of the policy type: dict @@ -336,48 +331,53 @@ policy: type: dict returned: success example: {'tagName': 'Some Value'} -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager def main(): - custom_action_options = dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), # Poorly documented, but "publishMetricAction.dimensions ... must have length less than or equal to 1" - publish_metric_dimension_value=dict(type='str', required=False, aliases=['publish_metric_dimension_values']), + publish_metric_dimension_value=dict(type="str", required=False, aliases=["publish_metric_dimension_values"]), # NetworkFirewallPolicyManager can cope with a list for future-proofing # publish_metric_dimension_values=dict(type='list', elements='str', required=False, aliases=['publish_metric_dimension_value']), ) argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), - state=dict(type='str', required=False, default='present', choices=['present', 'absent']), - description=dict(type='str', required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - stateful_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateful_groups']), - stateless_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateless_groups']), - stateful_default_actions=dict(type='list', elements='str', required=False), - stateless_default_actions=dict(type='list', elements='str', required=False), - stateless_fragment_default_actions=dict(type='list', elements='str', required=False), - stateful_rule_order=dict(type='str', required=False, choices=['strict', 'default'], aliases=['rule_order']), - stateless_custom_actions=dict(type='list', elements='dict', required=False, - options=custom_action_options, aliases=['custom_stateless_actions']), - purge_stateless_custom_actions=dict(type='bool', required=False, default=True, aliases=['purge_custom_stateless_actions']), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), + state=dict(type="str", required=False, default="present", choices=["present", "absent"]), + description=dict(type="str", required=False), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + stateful_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateful_groups"]), + stateless_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateless_groups"]), + stateful_default_actions=dict(type="list", elements="str", required=False), + stateless_default_actions=dict(type="list", elements="str", required=False), + stateless_fragment_default_actions=dict(type="list", elements="str", required=False), + stateful_rule_order=dict(type="str", required=False, choices=["strict", "default"], aliases=["rule_order"]), + stateless_custom_actions=dict( + type="list", + elements="dict", + required=False, + options=custom_action_options, + aliases=["custom_stateless_actions"], + ), + purge_stateless_custom_actions=dict( + type="bool", required=False, default=True, aliases=["purge_custom_stateless_actions"] + ), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) mutually_exclusive = [ - ('arn', 'name',) + ["arn", "name"], ] required_one_of = [ - ('arn', 'name',) + ["arn", "name"], ] module = AnsibleAWSModule( @@ -387,36 +387,32 @@ def main(): required_one_of=required_one_of, ) - arn = module.params.get('arn') - name = module.params.get('name') - state = module.params.get('state') + arn = module.params.get("arn") + name = module.params.get("name") + state = module.params.get("state") manager = NetworkFirewallPolicyManager(module, name=name, arn=arn) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - rule_order = module.params.get('stateful_rule_order') - if rule_order and rule_order != "default": - module.require_botocore_at_least('1.21.52', reason='to set the rule order') - if module.params.get('stateful_default_actions'): - module.require_botocore_at_least( - '1.21.52', reason='to set the default actions for stateful flows') + rule_order = module.params.get("stateful_rule_order") - if state == 'absent': + if state == "absent": manager.delete() else: - manager.set_description(module.params.get('description', None)) - manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) + manager.set_description(module.params.get("description", None)) + manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) # Actions need to be defined before potentially consuming them manager.set_custom_stateless_actions( - module.params.get('stateless_custom_actions', None), - module.params.get('purge_stateless_custom_actions', True)), - manager.set_stateful_rule_order(module.params.get('stateful_rule_order', None)) - manager.set_stateful_rule_groups(module.params.get('stateful_rule_groups', None)) - manager.set_stateless_rule_groups(module.params.get('stateless_rule_groups', None)) - manager.set_stateful_default_actions(module.params.get('stateful_default_actions', None)) - manager.set_stateless_default_actions(module.params.get('stateless_default_actions', None)) - manager.set_stateless_fragment_default_actions(module.params.get('stateless_fragment_default_actions', None)) + module.params.get("stateless_custom_actions", None), + module.params.get("purge_stateless_custom_actions", True), + ), + manager.set_stateful_rule_order(module.params.get("stateful_rule_order", None)) + manager.set_stateful_rule_groups(module.params.get("stateful_rule_groups", None)) + manager.set_stateless_rule_groups(module.params.get("stateless_rule_groups", None)) + manager.set_stateful_default_actions(module.params.get("stateful_default_actions", None)) + manager.set_stateless_default_actions(module.params.get("stateless_default_actions", None)) + manager.set_stateless_fragment_default_actions(module.params.get("stateless_fragment_default_actions", None)) manager.flush_changes() @@ -429,9 +425,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py index 1f170f5b3..3bb921745 100644 --- a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py +++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_policy_info short_description: describe AWS Network Firewall policies version_added: 4.0.0 @@ -26,14 +24,15 @@ options: required: false type: str -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe all Firewall policies in an account - community.aws.networkfirewall_policy_info: {} @@ -45,9 +44,9 @@ EXAMPLES = ''' # Describe a Firewall policy by name - community.aws.networkfirewall_policy_info: name: ExamplePolicy -''' +""" -RETURN = ''' +RETURN = r""" policy_list: description: A list of ARNs of the matching policies. type: list @@ -212,30 +211,28 @@ policies: type: dict returned: success example: {'tagName': 'Some Value'} -''' +""" - -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager def main(): - argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name',), + ["arn", "name"], ], ) - arn = module.params.get('arn') - name = module.params.get('name') + arn = module.params.get("arn") + name = module.params.get("name") manager = NetworkFirewallPolicyManager(module) @@ -244,17 +241,17 @@ def main(): if name or arn: policy = manager.get_policy(name=name, arn=arn) if policy: - results['policies'] = [policy] + results["policies"] = [policy] else: - results['policies'] = [] + results["policies"] = [] else: policy_list = manager.list() - results['policy_list'] = policy_list + results["policy_list"] = policy_list policies = [manager.get_policy(arn=p) for p in policy_list] - results['policies'] = policies + results["policies"] = policies module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py index c8e2ea38b..9300036c5 100644 --- a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py +++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_rule_group short_description: create, delete and modify AWS Network Firewall rule groups version_added: 4.0.0 @@ -60,7 +58,6 @@ options: - Mutually exclusive with I(rule_type=stateless). - For more information on how rules are evaluated read the AWS documentation U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html). - - I(rule_order) requires botocore>=1.23.23. type: str required: false choices: ['default', 'strict'] @@ -263,17 +260,16 @@ options: type: int required: false - author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create a rule group - name: Create a minimal AWS Network Firewall Rule Group community.aws.networkfirewall_rule_group: @@ -369,8 +365,8 @@ EXAMPLES = ''' domain_names: - 'example.com' - '.example.net' - filter_https: True - filter_http: True + filter_https: true + filter_http: true action: allow source_ips: '192.0.2.0/24' @@ -396,10 +392,9 @@ EXAMPLES = ''' name: 'MinimalGroup' type: 'stateful' state: absent +""" -''' - -RETURN = ''' +RETURN = r""" rule_group: description: Details of the rules in the rule group type: dict @@ -708,109 +703,104 @@ rule_group: type: str returned: success example: 'STATEFUL' -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager def main(): - domain_list_spec = dict( - domain_names=dict(type='list', elements='str', required=True), - filter_http=dict(type='bool', required=False, default=False), - filter_https=dict(type='bool', required=False, default=False), - action=dict(type='str', required=True, choices=['allow', 'deny']), - source_ips=dict(type='list', elements='str', required=False), + domain_names=dict(type="list", elements="str", required=True), + filter_http=dict(type="bool", required=False, default=False), + filter_https=dict(type="bool", required=False, default=False), + action=dict(type="str", required=True, choices=["allow", "deny"]), + source_ips=dict(type="list", elements="str", required=False), ) rule_list_spec = dict( - action=dict(type='str', required=True, choices=['pass', 'drop', 'alert']), - protocol=dict(type='str', required=True), - source=dict(type='str', required=True), - source_port=dict(type='str', required=True), - direction=dict(type='str', required=False, default='forward', choices=['forward', 'any']), - destination=dict(type='str', required=True), - destination_port=dict(type='str', required=True), - sid=dict(type='int', required=True), - rule_options=dict(type='dict', required=False), + action=dict(type="str", required=True, choices=["pass", "drop", "alert"]), + protocol=dict(type="str", required=True), + source=dict(type="str", required=True), + source_port=dict(type="str", required=True), + direction=dict(type="str", required=False, default="forward", choices=["forward", "any"]), + destination=dict(type="str", required=True), + destination_port=dict(type="str", required=True), + sid=dict(type="int", required=True), + rule_options=dict(type="dict", required=False), ) argument_spec = dict( - arn=dict(type='str', required=False), - name=dict(type='str', required=False), - rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateful']), + arn=dict(type="str", required=False), + name=dict(type="str", required=False), + rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateful"]), # rule_type=dict(type='str', required=True, aliases=['type'], choices=['stateless', 'stateful']), - state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), - capacity=dict(type='int', required=False), - rule_order=dict(type='str', required=False, aliases=['stateful_rule_order'], choices=['default', 'strict']), - description=dict(type='str', required=False), - ip_variables=dict(type='dict', required=False, aliases=['ip_set_variables']), - purge_ip_variables=dict(type='bool', required=False, aliases=['purge_ip_set_variables'], default=True), - port_variables=dict(type='dict', required=False, aliases=['port_set_variables']), - purge_port_variables=dict(type='bool', required=False, aliases=['purge_port_set_variables'], default=True), - rule_strings=dict(type='list', elements='str', required=False), - domain_list=dict(type='dict', options=domain_list_spec, required=False), - rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + state=dict(type="str", required=False, choices=["present", "absent"], default="present"), + capacity=dict(type="int", required=False), + rule_order=dict(type="str", required=False, aliases=["stateful_rule_order"], choices=["default", "strict"]), + description=dict(type="str", required=False), + ip_variables=dict(type="dict", required=False, aliases=["ip_set_variables"]), + purge_ip_variables=dict(type="bool", required=False, aliases=["purge_ip_set_variables"], default=True), + port_variables=dict(type="dict", required=False, aliases=["port_set_variables"]), + purge_port_variables=dict(type="bool", required=False, aliases=["purge_port_set_variables"], default=True), + rule_strings=dict(type="list", elements="str", required=False), + domain_list=dict(type="dict", options=domain_list_spec, required=False), + rule_list=dict( + type="list", elements="dict", aliases=["stateful_rule_list"], options=rule_list_spec, required=False + ), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('name', 'arn'), - ('rule_strings', 'domain_list', 'rule_list'), - ('domain_list', 'ip_variables'), + ["name", "arn"], + ["rule_strings", "domain_list", "rule_list"], + ["domain_list", "ip_variables"], ], required_together=[ - ('name', 'rule_type'), + ["name", "rule_type"], ], required_one_of=[ - ('name', 'arn'), + ["name", "arn"], ], ) - module.require_botocore_at_least('1.19.20') - - state = module.params.get('state') - name = module.params.get('name') - arn = module.params.get('arn') - rule_type = module.params.get('rule_type') - - if rule_type == 'stateless': - if module.params.get('rule_order'): - module.fail_json('rule_order can not be set for stateless rule groups') - if module.params.get('rule_strings'): - module.fail_json('rule_strings can only be used for stateful rule groups') - if module.params.get('rule_list'): - module.fail_json('rule_list can only be used for stateful rule groups') - if module.params.get('domain_list'): - module.fail_json('domain_list can only be used for stateful rule groups') - - if module.params.get('rule_order'): - module.require_botocore_at_least('1.23.23', reason='to set the rule order') + state = module.params.get("state") + name = module.params.get("name") + arn = module.params.get("arn") + rule_type = module.params.get("rule_type") + + if rule_type == "stateless": + if module.params.get("rule_order"): + module.fail_json("rule_order can not be set for stateless rule groups") + if module.params.get("rule_strings"): + module.fail_json("rule_strings can only be used for stateful rule groups") + if module.params.get("rule_list"): + module.fail_json("rule_list can only be used for stateful rule groups") + if module.params.get("domain_list"): + module.fail_json("domain_list can only be used for stateful rule groups") manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': + if state == "absent": manager.delete() else: - manager.set_description(module.params.get('description')) - manager.set_capacity(module.params.get('capacity')) - manager.set_rule_order(module.params.get('rule_order')) - manager.set_ip_variables(module.params.get('ip_variables'), module.params.get('purge_ip_variables')) - manager.set_port_variables(module.params.get('port_variables'), module.params.get('purge_port_variables')) - manager.set_rule_string(module.params.get('rule_strings')) - manager.set_domain_list(module.params.get('domain_list')) - manager.set_rule_list(module.params.get('rule_list')) - manager.set_tags(module.params.get('tags'), module.params.get('purge_tags')) + manager.set_description(module.params.get("description")) + manager.set_capacity(module.params.get("capacity")) + manager.set_rule_order(module.params.get("rule_order")) + manager.set_ip_variables(module.params.get("ip_variables"), module.params.get("purge_ip_variables")) + manager.set_port_variables(module.params.get("port_variables"), module.params.get("purge_port_variables")) + manager.set_rule_string(module.params.get("rule_strings")) + manager.set_domain_list(module.params.get("domain_list")) + manager.set_rule_list(module.params.get("rule_list")) + manager.set_tags(module.params.get("tags"), module.params.get("purge_tags")) manager.flush_changes() @@ -823,9 +813,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py index a9cec3778..8b3c9d230 100644 --- a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py +++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_rule_group_info short_description: describe AWS Network Firewall rule groups version_added: 4.0.0 @@ -38,19 +36,19 @@ options: - When I(scope='account') returns a description of all rule groups in the account. - When I(scope='managed') returns a list of available managed rule group arns. - By default searches only at the account scope. - - I(scope='managed') requires botocore>=1.23.23. required: false choices: ['managed', 'account'] type: str -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe all Rule Groups in an account (excludes managed groups) - community.aws.networkfirewall_rule_group_info: {} @@ -68,10 +66,9 @@ EXAMPLES = ''' - community.aws.networkfirewall_rule_group_info: name: ExampleRuleGroup type: stateful +""" -''' - -RETURN = ''' +RETURN = r""" rule_list: description: A list of ARNs of the matching rule groups. type: list @@ -387,43 +384,36 @@ rule_groups: type: str returned: success example: 'STATEFUL' -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager def main(): - argument_spec = dict( - name=dict(type='str', required=False), - rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateless', 'stateful']), - arn=dict(type='str', required=False), - scope=dict(type='str', required=False, choices=['managed', 'account']), + name=dict(type="str", required=False), + rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateless", "stateful"]), + arn=dict(type="str", required=False), + scope=dict(type="str", required=False, choices=["managed", "account"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name',), - ('arn', 'rule_type'), + ["arn", "name"], + ["arn", "rule_type"], ], required_together=[ - ('name', 'rule_type'), - ] + ["name", "rule_type"], + ], ) - module.require_botocore_at_least('1.19.20') - - arn = module.params.get('arn') - name = module.params.get('name') - rule_type = module.params.get('rule_type') - scope = module.params.get('scope') - - if module.params.get('scope') == 'managed': - module.require_botocore_at_least('1.23.23', reason='to list managed rules') + arn = module.params.get("arn") + name = module.params.get("name") + rule_type = module.params.get("rule_type") + scope = module.params.get("scope") manager = NetworkFirewallRuleManager(module, name=name, rule_type=rule_type) @@ -432,18 +422,18 @@ def main(): if name or arn: rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn) if rule: - results['rule_groups'] = [rule] + results["rule_groups"] = [rule] else: - results['rule_groups'] = [] + results["rule_groups"] = [] else: rule_list = manager.list(scope=scope) - results['rule_list'] = rule_list - if scope != 'managed': + results["rule_list"] = rule_list + if scope != "managed": rules = [manager.get_rule_group(arn=r) for r in rule_list] - results['rule_groups'] = rules + results["rule_groups"] = rules module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/opensearch.py b/ansible_collections/community/aws/plugins/modules/opensearch.py index 7ed8c0722..d89e173bb 100644 --- a/ansible_collections/community/aws/plugins/modules/opensearch.py +++ b/ansible_collections/community/aws/plugins/modules/opensearch.py @@ -1,20 +1,18 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = """ +DOCUMENTATION = r""" --- module: opensearch short_description: Creates OpenSearch or ElasticSearch domain description: - Creates or modify a Amazon OpenSearch Service domain. version_added: 4.0.0 -author: "Sebastien Rosset (@sebastien-rosset)" +author: + - "Sebastien Rosset (@sebastien-rosset)" options: state: description: @@ -387,16 +385,16 @@ options: - how long before wait gives up, in seconds. default: 300 type: int -requirements: - - botocore >= 1.21.38 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags + - amazon.aws.boto3 """ -EXAMPLES = """ +RETURN = r""" # """ + +EXAMPLES = r""" - name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters community.aws.opensearch: @@ -452,16 +450,16 @@ EXAMPLES = """ auto_tune_options: enabled: true maintenance_schedules: - - start_at: "2025-01-12" - duration: - value: 1 - unit: "HOURS" - cron_expression_for_recurrence: "cron(0 12 * * ? *)" - - start_at: "2032-01-12" - duration: - value: 2 - unit: "HOURS" - cron_expression_for_recurrence: "cron(0 12 * * ? *)" + - start_at: "2025-01-12" + duration: + value: 1 + unit: "HOURS" + cron_expression_for_recurrence: "cron(0 12 * * ? *)" + - start_at: "2032-01-12" + duration: + value: 2 + unit: "HOURS" + cron_expression_for_recurrence: "cron(0 12 * * ? *)" tags: Environment: Development Application: Search @@ -480,12 +478,11 @@ EXAMPLES = """ cluster_config: instance_count: 40 wait: true - """ -from copy import deepcopy import datetime import json +from copy import deepcopy try: import botocore @@ -494,26 +491,20 @@ except ImportError: from ansible.module_utils.six import string_types -# import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.core import ( - AnsibleAWSModule, - is_boto3_error_code, -) -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - boto3_tag_list_to_ansible_dict, - compare_policies, -) -from ansible_collections.community.aws.plugins.module_utils.opensearch import ( - compare_domain_versions, - ensure_tags, - get_domain_status, - get_domain_config, - get_target_increment_version, - normalize_opensearch, - parse_version, - wait_for_domain_status, -) +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.opensearch import compare_domain_versions +from ansible_collections.community.aws.plugins.module_utils.opensearch import ensure_tags +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_target_increment_version +from ansible_collections.community.aws.plugins.module_utils.opensearch import normalize_opensearch +from ansible_collections.community.aws.plugins.module_utils.opensearch import parse_version +from ansible_collections.community.aws.plugins.module_utils.opensearch import wait_for_domain_status def ensure_domain_absent(client, module): @@ -522,16 +513,17 @@ def ensure_domain_absent(client, module): domain = get_domain_status(client, module, domain_name) if module.check_mode: - module.exit_json( - changed=True, msg="Would have deleted domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have deleted domain if not in check mode") try: client.delete_domain(DomainName=domain_name) changed = True except is_boto3_error_code("ResourceNotFoundException"): # The resource does not exist, or it has already been deleted return dict(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="trying to delete domain") # If we're not waiting for a delete to complete then we're all done @@ -543,7 +535,10 @@ def ensure_domain_absent(client, module): return dict(changed=changed) except is_boto3_error_code("ResourceNotFoundException"): return dict(changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "awaiting domain deletion") @@ -568,8 +563,9 @@ def upgrade_domain(client, module, source_version, target_engine_version): # It's not possible to upgrade directly to the target version. # Check the module parameters to determine if this is allowed or not. if not module.params.get("allow_intermediate_upgrades"): - module.fail_json(msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( - source_version, target_engine_version, next_version)) + module.fail_json( + msg=f"Cannot upgrade from {source_version} to version {target_engine_version}. The highest compatible version is {next_version}" + ) parameters = { "DomainName": domain_name, @@ -592,17 +588,13 @@ def upgrade_domain(client, module, source_version, target_engine_version): # raised if it's not possible to upgrade to the target version. module.fail_json_aws( e, - msg="Couldn't upgrade domain {0} from {1} to {2}".format( - domain_name, current_version, next_version - ), + msg=f"Couldn't upgrade domain {domain_name} from {current_version} to {next_version}", ) if module.check_mode: module.exit_json( changed=True, - msg="Would have upgraded domain from {0} to {1} if not in check mode".format( - current_version, next_version - ), + msg=f"Would have upgraded domain from {current_version} to {next_version} if not in check mode", ) current_version = next_version @@ -610,9 +602,7 @@ def upgrade_domain(client, module, source_version, target_engine_version): wait_for_domain_status(client, module, domain_name, "domain_available") -def set_cluster_config( - module, current_domain_config, desired_domain_config, change_set -): +def set_cluster_config(module, current_domain_config, desired_domain_config, change_set): changed = False cluster_config = desired_domain_config["ClusterConfig"] @@ -627,24 +617,16 @@ def set_cluster_config( if cluster_config["ZoneAwarenessEnabled"]: if cluster_opts.get("availability_zone_count") is not None: cluster_config["ZoneAwarenessConfig"] = { - "AvailabilityZoneCount": cluster_opts.get( - "availability_zone_count" - ), + "AvailabilityZoneCount": cluster_opts.get("availability_zone_count"), } if cluster_opts.get("dedicated_master") is not None: - cluster_config["DedicatedMasterEnabled"] = cluster_opts.get( - "dedicated_master" - ) + cluster_config["DedicatedMasterEnabled"] = cluster_opts.get("dedicated_master") if cluster_config["DedicatedMasterEnabled"]: if cluster_opts.get("dedicated_master_instance_type") is not None: - cluster_config["DedicatedMasterType"] = cluster_opts.get( - "dedicated_master_instance_type" - ) + cluster_config["DedicatedMasterType"] = cluster_opts.get("dedicated_master_instance_type") if cluster_opts.get("dedicated_master_instance_count") is not None: - cluster_config["DedicatedMasterCount"] = cluster_opts.get( - "dedicated_master_instance_count" - ) + cluster_config["DedicatedMasterCount"] = cluster_opts.get("dedicated_master_instance_count") if cluster_opts.get("warm_enabled") is not None: cluster_config["WarmEnabled"] = cluster_opts.get("warm_enabled") @@ -665,32 +647,19 @@ def set_cluster_config( if cold_storage_opts is not None and cold_storage_opts.get("enabled"): module.fail_json(msg="Cold Storage is not supported") cluster_config.pop("ColdStorageOptions", None) - if ( - current_domain_config is not None - and "ClusterConfig" in current_domain_config - ): + if current_domain_config is not None and "ClusterConfig" in current_domain_config: # Remove 'ColdStorageOptions' from the current domain config, otherwise the actual vs desired diff # will indicate a change must be done. current_domain_config["ClusterConfig"].pop("ColdStorageOptions", None) else: # Elasticsearch 7.9 and above support ColdStorageOptions. - if ( - cold_storage_opts is not None - and cold_storage_opts.get("enabled") is not None - ): + if cold_storage_opts is not None and cold_storage_opts.get("enabled") is not None: cluster_config["ColdStorageOptions"] = { "Enabled": cold_storage_opts.get("enabled"), } - if ( - current_domain_config is not None - and current_domain_config["ClusterConfig"] != cluster_config - ): - change_set.append( - "ClusterConfig changed from {0} to {1}".format( - current_domain_config["ClusterConfig"], cluster_config - ) - ) + if current_domain_config is not None and current_domain_config["ClusterConfig"] != cluster_config: + change_set.append(f"ClusterConfig changed from {current_domain_config['ClusterConfig']} to {cluster_config}") changed = True return changed @@ -716,22 +685,13 @@ def set_ebs_options(module, current_domain_config, desired_domain_config, change if ebs_opts.get("iops") is not None: ebs_config["Iops"] = ebs_opts.get("iops") - if ( - current_domain_config is not None - and current_domain_config["EBSOptions"] != ebs_config - ): - change_set.append( - "EBSOptions changed from {0} to {1}".format( - current_domain_config["EBSOptions"], ebs_config - ) - ) + if current_domain_config is not None and current_domain_config["EBSOptions"] != ebs_config: + change_set.append(f"EBSOptions changed from {current_domain_config['EBSOptions']} to {ebs_config}") changed = True return changed -def set_encryption_at_rest_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set): changed = False encryption_at_rest_config = desired_domain_config["EncryptionAtRestOptions"] encryption_at_rest_opts = module.params.get("encryption_at_rest_options") @@ -745,50 +705,36 @@ def set_encryption_at_rest_options( } else: if encryption_at_rest_opts.get("kms_key_id") is not None: - encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get( - "kms_key_id" - ) + encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get("kms_key_id") if ( current_domain_config is not None - and current_domain_config["EncryptionAtRestOptions"] - != encryption_at_rest_config + and current_domain_config["EncryptionAtRestOptions"] != encryption_at_rest_config ): change_set.append( - "EncryptionAtRestOptions changed from {0} to {1}".format( - current_domain_config["EncryptionAtRestOptions"], - encryption_at_rest_config, - ) + f"EncryptionAtRestOptions changed from {current_domain_config['EncryptionAtRestOptions']} to" + f" {encryption_at_rest_config}" ) changed = True return changed -def set_node_to_node_encryption_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set): changed = False - node_to_node_encryption_config = desired_domain_config[ - "NodeToNodeEncryptionOptions" - ] + node_to_node_encryption_config = desired_domain_config["NodeToNodeEncryptionOptions"] node_to_node_encryption_opts = module.params.get("node_to_node_encryption_options") if node_to_node_encryption_opts is None: return changed if node_to_node_encryption_opts.get("enabled") is not None: - node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get( - "enabled" - ) + node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get("enabled") if ( current_domain_config is not None - and current_domain_config["NodeToNodeEncryptionOptions"] - != node_to_node_encryption_config + and current_domain_config["NodeToNodeEncryptionOptions"] != node_to_node_encryption_config ): change_set.append( - "NodeToNodeEncryptionOptions changed from {0} to {1}".format( - current_domain_config["NodeToNodeEncryptionOptions"], - node_to_node_encryption_config, - ) + f"NodeToNodeEncryptionOptions changed from {current_domain_config['NodeToNodeEncryptionOptions']} to" + f" {node_to_node_encryption_config}" ) changed = True return changed @@ -846,53 +792,36 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change pass else: # Note the subnets may be the same but be listed in a different order. - if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set( - vpc_config["SubnetIds"] - ): + if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(vpc_config["SubnetIds"]): change_set.append( - "SubnetIds changed from {0} to {1}".format( - current_domain_config["VPCOptions"]["SubnetIds"], - vpc_config["SubnetIds"], - ) + f"SubnetIds changed from {current_domain_config['VPCOptions']['SubnetIds']} to" + f" {vpc_config['SubnetIds']}" ) changed = True - if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set( - vpc_config["SecurityGroupIds"] - ): + if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(vpc_config["SecurityGroupIds"]): change_set.append( - "SecurityGroup changed from {0} to {1}".format( - current_domain_config["VPCOptions"]["SecurityGroupIds"], - vpc_config["SecurityGroupIds"], - ) + f"SecurityGroup changed from {current_domain_config['VPCOptions']['SecurityGroupIds']} to" + f" {vpc_config['SecurityGroupIds']}" ) changed = True return changed -def set_snapshot_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_snapshot_options(module, current_domain_config, desired_domain_config, change_set): changed = False snapshot_config = desired_domain_config["SnapshotOptions"] snapshot_opts = module.params.get("snapshot_options") if snapshot_opts is None: return changed if snapshot_opts.get("automated_snapshot_start_hour") is not None: - snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get( - "automated_snapshot_start_hour" - ) - if ( - current_domain_config is not None - and current_domain_config["SnapshotOptions"] != snapshot_config - ): + snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get("automated_snapshot_start_hour") + if current_domain_config is not None and current_domain_config["SnapshotOptions"] != snapshot_config: change_set.append("SnapshotOptions changed") changed = True return changed -def set_cognito_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_cognito_options(module, current_domain_config, desired_domain_config, change_set): changed = False cognito_config = desired_domain_config["CognitoOptions"] cognito_opts = module.params.get("cognito_options") @@ -908,28 +837,17 @@ def set_cognito_options( if cognito_opts.get("cognito_user_pool_id") is not None: cognito_config["UserPoolId"] = cognito_opts.get("cognito_user_pool_id") if cognito_opts.get("cognito_identity_pool_id") is not None: - cognito_config["IdentityPoolId"] = cognito_opts.get( - "cognito_identity_pool_id" - ) + cognito_config["IdentityPoolId"] = cognito_opts.get("cognito_identity_pool_id") if cognito_opts.get("cognito_role_arn") is not None: cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn") - if ( - current_domain_config is not None - and current_domain_config["CognitoOptions"] != cognito_config - ): - change_set.append( - "CognitoOptions changed from {0} to {1}".format( - current_domain_config["CognitoOptions"], cognito_config - ) - ) + if current_domain_config is not None and current_domain_config["CognitoOptions"] != cognito_config: + change_set.append(f"CognitoOptions changed from {current_domain_config['CognitoOptions']} to {cognito_config}") changed = True return changed -def set_advanced_security_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set): changed = False advanced_security_config = desired_domain_config["AdvancedSecurityOptions"] advanced_security_opts = module.params.get("advanced_security_options") @@ -943,121 +861,87 @@ def set_advanced_security_options( } else: if advanced_security_opts.get("internal_user_database_enabled") is not None: - advanced_security_config[ - "InternalUserDatabaseEnabled" - ] = advanced_security_opts.get("internal_user_database_enabled") + advanced_security_config["InternalUserDatabaseEnabled"] = advanced_security_opts.get( + "internal_user_database_enabled" + ) master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: advanced_security_config.setdefault("MasterUserOptions", {}) if master_user_opts.get("master_user_arn") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserARN" - ] = master_user_opts.get("master_user_arn") + advanced_security_config["MasterUserOptions"]["MasterUserARN"] = master_user_opts.get("master_user_arn") if master_user_opts.get("master_user_name") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserName" - ] = master_user_opts.get("master_user_name") + advanced_security_config["MasterUserOptions"]["MasterUserName"] = master_user_opts.get( + "master_user_name" + ) if master_user_opts.get("master_user_password") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserPassword" - ] = master_user_opts.get("master_user_password") + advanced_security_config["MasterUserOptions"]["MasterUserPassword"] = master_user_opts.get( + "master_user_password" + ) saml_opts = advanced_security_opts.get("saml_options") if saml_opts is not None: if saml_opts.get("enabled") is not None: - advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get( - "enabled" - ) + advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get("enabled") idp_opts = saml_opts.get("idp") if idp_opts is not None: if idp_opts.get("metadata_content") is not None: - advanced_security_config["SamlOptions"]["Idp"][ - "MetadataContent" - ] = idp_opts.get("metadata_content") + advanced_security_config["SamlOptions"]["Idp"]["MetadataContent"] = idp_opts.get("metadata_content") if idp_opts.get("entity_id") is not None: - advanced_security_config["SamlOptions"]["Idp"][ - "EntityId" - ] = idp_opts.get("entity_id") + advanced_security_config["SamlOptions"]["Idp"]["EntityId"] = idp_opts.get("entity_id") if saml_opts.get("master_user_name") is not None: - advanced_security_config["SamlOptions"][ - "MasterUserName" - ] = saml_opts.get("master_user_name") + advanced_security_config["SamlOptions"]["MasterUserName"] = saml_opts.get("master_user_name") if saml_opts.get("master_backend_role") is not None: - advanced_security_config["SamlOptions"][ - "MasterBackendRole" - ] = saml_opts.get("master_backend_role") + advanced_security_config["SamlOptions"]["MasterBackendRole"] = saml_opts.get("master_backend_role") if saml_opts.get("subject_key") is not None: - advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get( - "subject_key" - ) + advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get("subject_key") if saml_opts.get("roles_key") is not None: - advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get( - "roles_key" - ) + advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get("roles_key") if saml_opts.get("session_timeout_minutes") is not None: - advanced_security_config["SamlOptions"][ - "SessionTimeoutMinutes" - ] = saml_opts.get("session_timeout_minutes") + advanced_security_config["SamlOptions"]["SessionTimeoutMinutes"] = saml_opts.get( + "session_timeout_minutes" + ) if ( current_domain_config is not None and current_domain_config["AdvancedSecurityOptions"] != advanced_security_config ): change_set.append( - "AdvancedSecurityOptions changed from {0} to {1}".format( - current_domain_config["AdvancedSecurityOptions"], - advanced_security_config, - ) + f"AdvancedSecurityOptions changed from {current_domain_config['AdvancedSecurityOptions']} to" + f" {advanced_security_config}" ) changed = True return changed -def set_domain_endpoint_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set): changed = False domain_endpoint_config = desired_domain_config["DomainEndpointOptions"] domain_endpoint_opts = module.params.get("domain_endpoint_options") if domain_endpoint_opts is None: return changed if domain_endpoint_opts.get("enforce_https") is not None: - domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get( - "enforce_https" - ) + domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get("enforce_https") if domain_endpoint_opts.get("tls_security_policy") is not None: - domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get( - "tls_security_policy" - ) + domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get("tls_security_policy") if domain_endpoint_opts.get("custom_endpoint_enabled") is not None: - domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get( - "custom_endpoint_enabled" - ) + domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get("custom_endpoint_enabled") if domain_endpoint_config["CustomEndpointEnabled"]: if domain_endpoint_opts.get("custom_endpoint") is not None: - domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get( - "custom_endpoint" - ) + domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get("custom_endpoint") if domain_endpoint_opts.get("custom_endpoint_certificate_arn") is not None: - domain_endpoint_config[ - "CustomEndpointCertificateArn" - ] = domain_endpoint_opts.get("custom_endpoint_certificate_arn") + domain_endpoint_config["CustomEndpointCertificateArn"] = domain_endpoint_opts.get( + "custom_endpoint_certificate_arn" + ) - if ( - current_domain_config is not None - and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config - ): + if current_domain_config is not None and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config: change_set.append( - "DomainEndpointOptions changed from {0} to {1}".format( - current_domain_config["DomainEndpointOptions"], domain_endpoint_config - ) + f"DomainEndpointOptions changed from {current_domain_config['DomainEndpointOptions']} to" + f" {domain_endpoint_config}" ) changed = True return changed -def set_auto_tune_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set): changed = False auto_tune_config = desired_domain_config["AutoTuneOptions"] auto_tune_opts = module.params.get("auto_tune_options") @@ -1088,31 +972,20 @@ def set_auto_tune_options( if duration_opt.get("unit") is not None: schedule_entry["Duration"]["Unit"] = duration_opt.get("unit") if s.get("cron_expression_for_recurrence") is not None: - schedule_entry["CronExpressionForRecurrence"] = s.get( - "cron_expression_for_recurrence" - ) + schedule_entry["CronExpressionForRecurrence"] = s.get("cron_expression_for_recurrence") auto_tune_config["MaintenanceSchedules"].append(schedule_entry) if current_domain_config is not None: - if ( - current_domain_config["AutoTuneOptions"]["DesiredState"] - != auto_tune_config["DesiredState"] - ): + if current_domain_config["AutoTuneOptions"]["DesiredState"] != auto_tune_config["DesiredState"]: change_set.append( - "AutoTuneOptions.DesiredState changed from {0} to {1}".format( - current_domain_config["AutoTuneOptions"]["DesiredState"], - auto_tune_config["DesiredState"], - ) + "AutoTuneOptions.DesiredState changed from" + f" {current_domain_config['AutoTuneOptions']['DesiredState']} to {auto_tune_config['DesiredState']}" ) changed = True - if ( - auto_tune_config["MaintenanceSchedules"] - != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"] - ): + if auto_tune_config["MaintenanceSchedules"] != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]: change_set.append( - "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format( - current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"], - auto_tune_config["MaintenanceSchedules"], - ) + "AutoTuneOptions.MaintenanceSchedules changed from" + f" {current_domain_config['AutoTuneOptions']['MaintenanceSchedules']} to" + f" {auto_tune_config['MaintenanceSchedules']}" ) changed = True return changed @@ -1127,18 +1000,12 @@ def set_access_policy(module, current_domain_config, desired_domain_config, chan try: access_policy_config = json.dumps(access_policy_opt) except Exception as e: - module.fail_json( - msg="Failed to convert the policy into valid JSON: %s" % str(e) - ) + module.fail_json(msg=f"Failed to convert the policy into valid JSON: {str(e)}") if current_domain_config is not None: # Updating existing domain current_access_policy = json.loads(current_domain_config["AccessPolicies"]) if not compare_policies(current_access_policy, access_policy_opt): - change_set.append( - "AccessPolicy changed from {0} to {1}".format( - current_access_policy, access_policy_opt - ) - ) + change_set.append(f"AccessPolicy changed from {current_access_policy} to {access_policy_opt}") changed = True desired_domain_config["AccessPolicies"] = access_policy_config else: @@ -1201,53 +1068,26 @@ def ensure_domain_present(client, module): # Validate the engine_version v = parse_version(module.params.get("engine_version")) if v is None: - module.fail_json( - "Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y" - ) + module.fail_json("Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y") desired_domain_config["EngineVersion"] = module.params.get("engine_version") changed = False change_set = [] # For check mode purpose - changed |= set_cluster_config( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_ebs_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_encryption_at_rest_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_node_to_node_encryption_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_vpc_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_snapshot_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_cognito_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_advanced_security_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_domain_endpoint_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_auto_tune_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_access_policy( - module, current_domain_config, desired_domain_config, change_set - ) + changed |= set_cluster_config(module, current_domain_config, desired_domain_config, change_set) + changed |= set_ebs_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_vpc_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_snapshot_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_cognito_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_access_policy(module, current_domain_config, desired_domain_config, change_set) if current_domain_config is not None: - if ( - desired_domain_config["EngineVersion"] - != current_domain_config["EngineVersion"] - ): + if desired_domain_config["EngineVersion"] != current_domain_config["EngineVersion"]: changed = True change_set.append("EngineVersion changed") upgrade_domain( @@ -1271,22 +1111,16 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, msg="Couldn't update domain {0}".format(domain_name) - ) + module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") else: # Create new OpenSearch cluster if module.params.get("access_policies") is None: - module.fail_json( - "state is present but the following is missing: access_policies" - ) + module.fail_json("state is present but the following is missing: access_policies") changed = True if module.check_mode: - module.exit_json( - changed=True, msg="Would have created a domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have created a domain if not in check mode") try: response = client.create_domain(**desired_domain_config) domain = response["DomainStatus"] @@ -1295,22 +1129,16 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, msg="Couldn't update domain {0}".format(domain_name) - ) + module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") try: - existing_tags = boto3_tag_list_to_ansible_dict( - client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] - ) + existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name) + module.fail_json_aws(e, f"Couldn't get tags for domain {domain_name}") desired_tags = module.params["tags"] purge_tags = module.params["purge_tags"] - changed |= ensure_tags( - client, module, domain_arn, existing_tags, desired_tags, purge_tags - ) + changed |= ensure_tags(client, module, domain_arn, existing_tags, desired_tags, purge_tags) if module.params.get("wait") and not module.check_mode: wait_for_domain_status(client, module, domain_name, "domain_available") @@ -1321,7 +1149,6 @@ def ensure_domain_present(client, module): def main(): - module = AnsibleAWSModule( argument_spec=dict( state=dict(choices=["present", "absent"], default="present"), @@ -1482,8 +1309,6 @@ def main(): supports_check_mode=True, ) - module.require_botocore_at_least("1.21.38") - try: client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/ansible_collections/community/aws/plugins/modules/opensearch_info.py b/ansible_collections/community/aws/plugins/modules/opensearch_info.py index 700ad26fd..98fce3e03 100644 --- a/ansible_collections/community/aws/plugins/modules/opensearch_info.py +++ b/ansible_collections/community/aws/plugins/modules/opensearch_info.py @@ -1,20 +1,18 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = """ +DOCUMENTATION = r""" --- module: opensearch_info short_description: obtain information about one or more OpenSearch or ElasticSearch domain description: - Obtain information about one Amazon OpenSearch Service domain. version_added: 4.0.0 -author: "Sebastien Rosset (@sebastien-rosset)" +author: + - "Sebastien Rosset (@sebastien-rosset)" options: domain_name: description: @@ -28,18 +26,16 @@ options: all tag key, value pairs. required: false type: dict -requirements: - - botocore >= 1.21.38 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 """ -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about an OpenSearch domain instance community.aws.opensearch_info: - domain-name: my-search-cluster + domain_name: my-search-cluster register: new_cluster_info - name: Get all OpenSearch instances @@ -50,9 +46,9 @@ EXAMPLES = ''' tags: Applications: search Environment: Development -''' +""" -RETURN = ''' +RETURN = r""" instances: description: List of OpenSearch domain instances returned: always @@ -441,7 +437,7 @@ instances: description: The name of the OpenSearch domain. returned: always type: str -''' +""" try: @@ -449,62 +445,63 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, -) -from ansible_collections.community.aws.plugins.module_utils.opensearch import ( - get_domain_config, - get_domain_status, -) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status def domain_info(client, module): - domain_name = module.params.get('domain_name') - filter_tags = module.params.get('tags') + domain_name = module.params.get("domain_name") + filter_tags = module.params.get("tags") domain_list = [] if domain_name: domain_status = get_domain_status(client, module, domain_name) if domain_status: - domain_list.append({'DomainStatus': domain_status}) + domain_list.append({"DomainStatus": domain_status}) else: - domain_summary_list = client.list_domain_names()['DomainNames'] + domain_summary_list = client.list_domain_names()["DomainNames"] for d in domain_summary_list: - domain_status = get_domain_status(client, module, d['DomainName']) + domain_status = get_domain_status(client, module, d["DomainName"]) if domain_status: - domain_list.append({'DomainStatus': domain_status}) + domain_list.append({"DomainStatus": domain_status}) # Get the domain tags for domain in domain_list: current_domain_tags = None - domain_arn = domain['DomainStatus']['ARN'] + domain_arn = domain["DomainStatus"]["ARN"] try: current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] - domain['Tags'] = boto3_tag_list_to_ansible_dict(current_domain_tags) + domain["Tags"] = boto3_tag_list_to_ansible_dict(current_domain_tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # This could potentially happen if a domain is deleted between the time # its domain status was queried and the tags were queried. - domain['Tags'] = {} + domain["Tags"] = {} # Filter by tags if filter_tags: for tag_key in filter_tags: try: - domain_list = [c for c in domain_list if ('Tags' in c) and (tag_key in c['Tags']) and (c['Tags'][tag_key] == filter_tags[tag_key])] + domain_list = [ + c + for c in domain_list + if ("Tags" in c) and (tag_key in c["Tags"]) and (c["Tags"][tag_key] == filter_tags[tag_key]) + ] except (TypeError, AttributeError) as e: module.fail_json(msg="OpenSearch tag filtering error", exception=e) # Get the domain config for idx, domain in enumerate(domain_list): - domain_name = domain['DomainStatus']['DomainName'] + domain_name = domain["DomainStatus"]["DomainName"] (domain_config, arn) = get_domain_config(client, module, domain_name) if domain_config: - domain['DomainConfig'] = domain_config - domain_list[idx] = camel_dict_to_snake_dict(domain, - ignore_list=['AdvancedOptions', 'Endpoints', 'Tags']) + domain["DomainConfig"] = domain_config + domain_list[idx] = camel_dict_to_snake_dict(domain, ignore_list=["AdvancedOptions", "Endpoints", "Tags"]) return dict(changed=False, domains=domain_list) @@ -513,11 +510,10 @@ def main(): module = AnsibleAWSModule( argument_spec=dict( domain_name=dict(required=False), - tags=dict(type='dict', required=False), + tags=dict(type="dict", required=False), ), supports_check_mode=True, ) - module.require_botocore_at_least("1.21.38") try: client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff()) @@ -527,5 +523,5 @@ def main(): module.exit_json(**domain_info(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/redshift.py b/ansible_collections/community/aws/plugins/modules/redshift.py index 27e959893..4463722e5 100644 --- a/ansible_collections/community/aws/plugins/modules/redshift.py +++ b/ansible_collections/community/aws/plugins/modules/redshift.py @@ -1,14 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2014 Jens Carl, Hothead Games Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: - "Jens Carl (@j-carl), Hothead Games Inc." @@ -170,13 +166,13 @@ options: notes: - Support for I(tags) and I(purge_tags) was added in release 1.3.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Basic cluster provisioning example community.aws.redshift: command: create @@ -191,9 +187,9 @@ EXAMPLES = r''' identifier: new_cluster skip_final_cluster_snapshot: true wait: true -''' +""" -RETURN = r''' +RETURN = r""" cluster: description: dictionary containing all the cluster information returned: success @@ -257,31 +253,33 @@ cluster: description: aws tags for cluster. returned: success type: dict -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _ensure_tags(redshift, identifier, existing_tags, module): """Compares and update resource tags""" - account_id = get_aws_account_id(module) - region = module.params.get('region') - resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}" .format(region, account_id, identifier) - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + account_id, partition = get_aws_account_info(module) + region = module.region + resource_arn = f"arn:{partition}:redshift:{region}:{account_id}:cluster:{identifier}" + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags) @@ -304,78 +302,77 @@ def _ensure_tags(redshift, identifier, existing_tags, module): def _collect_facts(resource): """Transform cluster information to dict.""" facts = { - 'identifier': resource['ClusterIdentifier'], - 'status': resource['ClusterStatus'], - 'username': resource['MasterUsername'], - 'db_name': resource['DBName'], - 'maintenance_window': resource['PreferredMaintenanceWindow'], - 'enhanced_vpc_routing': resource['EnhancedVpcRouting'] - + "identifier": resource["ClusterIdentifier"], + "status": resource["ClusterStatus"], + "username": resource["MasterUsername"], + "db_name": resource["DBName"], + "maintenance_window": resource["PreferredMaintenanceWindow"], + "enhanced_vpc_routing": resource["EnhancedVpcRouting"], } - for node in resource['ClusterNodes']: - if node['NodeRole'] in ('SHARED', 'LEADER'): - facts['private_ip_address'] = node['PrivateIPAddress'] - if facts['enhanced_vpc_routing'] is False: - facts['public_ip_address'] = node['PublicIPAddress'] + for node in resource["ClusterNodes"]: + if node["NodeRole"] in ("SHARED", "LEADER"): + facts["private_ip_address"] = node["PrivateIPAddress"] + if facts["enhanced_vpc_routing"] is False: + facts["public_ip_address"] = node["PublicIPAddress"] else: - facts['public_ip_address'] = None + facts["public_ip_address"] = None break # Some parameters are not ready instantly if you don't wait for available # cluster status - facts['create_time'] = None - facts['url'] = None - facts['port'] = None - facts['availability_zone'] = None - facts['tags'] = {} - - if resource['ClusterStatus'] != "creating": - facts['create_time'] = resource['ClusterCreateTime'] - facts['url'] = resource['Endpoint']['Address'] - facts['port'] = resource['Endpoint']['Port'] - facts['availability_zone'] = resource['AvailabilityZone'] - facts['tags'] = boto3_tag_list_to_ansible_dict(resource['Tags']) + facts["create_time"] = None + facts["url"] = None + facts["port"] = None + facts["availability_zone"] = None + facts["tags"] = {} + + if resource["ClusterStatus"] != "creating": + facts["create_time"] = resource["ClusterCreateTime"] + facts["url"] = resource["Endpoint"]["Address"] + facts["port"] = resource["Endpoint"]["Port"] + facts["availability_zone"] = resource["AvailabilityZone"] + facts["tags"] = boto3_tag_list_to_ansible_dict(resource["Tags"]) return facts @AWSRetry.jittered_backoff() def _describe_cluster(redshift, identifier): - ''' + """ Basic wrapper around describe_clusters with a retry applied - ''' - return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + """ + return redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] @AWSRetry.jittered_backoff() def _create_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around create_cluster with a retry applied - ''' + """ return redshift.create_cluster(**kwargs) # Simple wrapper around delete, try to avoid throwing an error if some other # operation is in progress -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) def _delete_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around delete_cluster with a retry applied. Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that we can still delete a cluster if some kind of change operation was in progress. - ''' + """ return redshift.delete_cluster(**kwargs) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) def _modify_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around modify_cluster with a retry applied. Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases where another modification is still in progress - ''' + """ return redshift.modify_cluster(**kwargs) @@ -389,59 +386,71 @@ def create_cluster(module, redshift): Returns: """ - identifier = module.params.get('identifier') - node_type = module.params.get('node_type') - username = module.params.get('username') - password = module.params.get('password') - d_b_name = module.params.get('db_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - tags = module.params.get('tags') + identifier = module.params.get("identifier") + node_type = module.params.get("node_type") + username = module.params.get("username") + password = module.params.get("password") + d_b_name = module.params.get("db_name") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + tags = module.params.get("tags") changed = True # Package up the optional parameters params = {} - for p in ('cluster_type', 'cluster_security_groups', - 'vpc_security_group_ids', 'cluster_subnet_group_name', - 'availability_zone', 'preferred_maintenance_window', - 'cluster_parameter_group_name', - 'automated_snapshot_retention_period', 'port', - 'cluster_version', 'allow_version_upgrade', - 'number_of_nodes', 'publicly_accessible', 'encrypted', - 'elastic_ip', 'enhanced_vpc_routing'): + for p in ( + "cluster_type", + "cluster_security_groups", + "vpc_security_group_ids", + "cluster_subnet_group_name", + "availability_zone", + "preferred_maintenance_window", + "cluster_parameter_group_name", + "automated_snapshot_retention_period", + "port", + "cluster_version", + "allow_version_upgrade", + "number_of_nodes", + "publicly_accessible", + "encrypted", + "elastic_ip", + "enhanced_vpc_routing", + ): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: - params['d_b_name'] = d_b_name + params["d_b_name"] = d_b_name if tags: tags = ansible_dict_to_boto3_tag_list(tags) - params['tags'] = tags + params["tags"] = tags try: _describe_cluster(redshift, identifier) changed = False - except is_boto3_error_code('ClusterNotFound'): + except is_boto3_error_code("ClusterNotFound"): try: - _create_cluster(redshift, - ClusterIdentifier=identifier, - NodeType=node_type, - MasterUsername=username, - MasterUserPassword=password, - **snake_dict_to_camel_dict(params, capitalize_first=True)) + _create_cluster( + redshift, + ClusterIdentifier=identifier, + NodeType=node_type, + MasterUsername=username, + MasterUserPassword=password, + **snake_dict_to_camel_dict(params, capitalize_first=True), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create cluster") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe cluster") if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_available') + waiter = redshift.get_waiter("cluster_available") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for the cluster creation") try: @@ -450,7 +459,7 @@ def create_cluster(module, redshift): module.fail_json_aws(e, msg="Failed to describe cluster") if tags: - if _ensure_tags(redshift, identifier, resource['Tags'], module): + if _ensure_tags(redshift, identifier, resource["Tags"], module): changed = True resource = _describe_cluster(redshift, identifier) @@ -464,7 +473,7 @@ def describe_cluster(module, redshift): module: Ansible module object redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') + identifier = module.params.get("identifier") try: resource = _describe_cluster(redshift, identifier) @@ -482,13 +491,12 @@ def delete_cluster(module, redshift): redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + identifier = module.params.get("identifier") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") params = {} - for p in ('skip_final_cluster_snapshot', - 'final_cluster_snapshot_identifier'): + for p in ("skip_final_cluster_snapshot", "final_cluster_snapshot_identifier"): if p in module.params: # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: @@ -496,22 +504,21 @@ def delete_cluster(module, redshift): try: _delete_cluster( - redshift, - ClusterIdentifier=identifier, - **snake_dict_to_camel_dict(params, capitalize_first=True)) - except is_boto3_error_code('ClusterNotFound'): + redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) + ) + except is_boto3_error_code("ClusterNotFound"): return False, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_deleted') + waiter = redshift.get_waiter("cluster_deleted") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") @@ -526,148 +533,160 @@ def modify_cluster(module, redshift): redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + identifier = module.params.get("identifier") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") # Package up the optional parameters params = {} - for p in ('cluster_type', 'cluster_security_groups', - 'vpc_security_group_ids', 'cluster_subnet_group_name', - 'availability_zone', 'preferred_maintenance_window', - 'cluster_parameter_group_name', - 'automated_snapshot_retention_period', 'port', 'cluster_version', - 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'): + for p in ( + "cluster_type", + "cluster_security_groups", + "vpc_security_group_ids", + "cluster_subnet_group_name", + "availability_zone", + "preferred_maintenance_window", + "cluster_parameter_group_name", + "automated_snapshot_retention_period", + "port", + "cluster_version", + "allow_version_upgrade", + "number_of_nodes", + "new_cluster_identifier", + ): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) # enhanced_vpc_routing parameter change needs an exclusive request - if module.params.get('enhanced_vpc_routing') is not None: + if module.params.get("enhanced_vpc_routing") is not None: try: _modify_cluster( - redshift, - ClusterIdentifier=identifier, - EnhancedVpcRouting=module.params.get('enhanced_vpc_routing')) + redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get("enhanced_vpc_routing") + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_available') + waiter = redshift.get_waiter("cluster_available") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts)) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, - msg="Timeout waiting for cluster enhanced vpc routing modification") + module.fail_json_aws(e, msg="Timeout waiting for cluster enhanced vpc routing modification") # change the rest try: _modify_cluster( - redshift, - ClusterIdentifier=identifier, - **snake_dict_to_camel_dict(params, capitalize_first=True)) + redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") - if module.params.get('new_cluster_identifier'): - identifier = module.params.get('new_cluster_identifier') + if module.params.get("new_cluster_identifier"): + identifier = module.params.get("new_cluster_identifier") if wait: attempts = wait_timeout // 60 - waiter2 = redshift.get_waiter('cluster_available') + waiter2 = redshift.get_waiter("cluster_available") try: - waiter2.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter2.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for cluster modification") try: resource = _describe_cluster(redshift, identifier) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") - if _ensure_tags(redshift, identifier, resource['Tags'], module): - resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + if _ensure_tags(redshift, identifier, resource["Tags"], module): + resource = redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] return True, _collect_facts(resource) def main(): argument_spec = dict( - command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True), + command=dict(choices=["create", "facts", "delete", "modify"], required=True), identifier=dict(required=True), - node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', - 'ds2.8xlarge', 'dc1.large', 'dc2.large', - 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', - 'dw2.large', 'dw2.8xlarge'], required=False), + node_type=dict( + choices=[ + "ds1.xlarge", + "ds1.8xlarge", + "ds2.xlarge", + "ds2.8xlarge", + "dc1.large", + "dc2.large", + "dc1.8xlarge", + "dw1.xlarge", + "dw1.8xlarge", + "dw2.large", + "dw2.8xlarge", + ], + required=False, + ), username=dict(required=False), password=dict(no_log=True, required=False), db_name=dict(required=False), - cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'), - cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'), - vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'), - skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], - type='bool', default=False), - final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False), - cluster_subnet_group_name=dict(aliases=['subnet']), - availability_zone=dict(aliases=['aws_zone', 'zone']), - preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']), - cluster_parameter_group_name=dict(aliases=['param_group_name']), - automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'), - port=dict(type='int'), - cluster_version=dict(aliases=['version'], choices=['1.0']), - allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True), - number_of_nodes=dict(type='int'), - publicly_accessible=dict(type='bool', default=False), - encrypted=dict(type='bool', default=False), + cluster_type=dict(choices=["multi-node", "single-node"], default="single-node"), + cluster_security_groups=dict(aliases=["security_groups"], type="list", elements="str"), + vpc_security_group_ids=dict(aliases=["vpc_security_groups"], type="list", elements="str"), + skip_final_cluster_snapshot=dict(aliases=["skip_final_snapshot"], type="bool", default=False), + final_cluster_snapshot_identifier=dict(aliases=["final_snapshot_id"], required=False), + cluster_subnet_group_name=dict(aliases=["subnet"]), + availability_zone=dict(aliases=["aws_zone", "zone"]), + preferred_maintenance_window=dict(aliases=["maintance_window", "maint_window"]), + cluster_parameter_group_name=dict(aliases=["param_group_name"]), + automated_snapshot_retention_period=dict(aliases=["retention_period"], type="int"), + port=dict(type="int"), + cluster_version=dict(aliases=["version"], choices=["1.0"]), + allow_version_upgrade=dict(aliases=["version_upgrade"], type="bool", default=True), + number_of_nodes=dict(type="int"), + publicly_accessible=dict(type="bool", default=False), + encrypted=dict(type="bool", default=False), elastic_ip=dict(required=False), - new_cluster_identifier=dict(aliases=['new_identifier']), - enhanced_vpc_routing=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True) + new_cluster_identifier=dict(aliases=["new_identifier"]), + enhanced_vpc_routing=dict(type="bool", default=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) required_if = [ - ('command', 'delete', ['skip_final_cluster_snapshot']), - ('command', 'create', ['node_type', - 'username', - 'password']) + ("command", "delete", ["skip_final_cluster_snapshot"]), + ("command", "create", ["node_type", "username", "password"]), ] module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=required_if + required_if=required_if, ) - command = module.params.get('command') - skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot') - final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier') + command = module.params.get("command") + skip_final_cluster_snapshot = module.params.get("skip_final_cluster_snapshot") + final_cluster_snapshot_identifier = module.params.get("final_cluster_snapshot_identifier") # can't use module basic required_if check for this case - if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: - module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False") + if command == "delete" and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: + module.fail_json( + msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False" + ) - conn = module.client('redshift') + conn = module.client("redshift") changed = True - if command == 'create': + if command == "create": (changed, cluster) = create_cluster(module, conn) - elif command == 'facts': + elif command == "facts": (changed, cluster) = describe_cluster(module, conn) - elif command == 'delete': + elif command == "delete": (changed, cluster) = delete_cluster(module, conn) - elif command == 'modify': + elif command == "modify": (changed, cluster) = modify_cluster(module, conn) module.exit_json(changed=changed, cluster=cluster) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py b/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py index 1c42ea802..d2894dfcb 100644 --- a/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py +++ b/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, JR Kerkstra <jrkerkstra@example.org> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: redshift_cross_region_snapshots version_added: 1.0.0 @@ -15,7 +12,8 @@ short_description: Manage Redshift Cross Region Snapshots description: - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots. - For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy) -author: JR Kerkstra (@captainkerk) +author: + - JR Kerkstra (@captainkerk) options: cluster_name: description: @@ -54,13 +52,12 @@ options: aliases: [ "retention_period" ] type: int extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws -- amazon.aws.boto3 - -''' + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: configure cross-region snapshot on cluster `johniscool` community.aws.redshift_cross_region_snapshots: cluster_name: johniscool @@ -84,24 +81,21 @@ EXAMPLES = ''' state: absent region: us-east-1 destination_region: us-west-2 -''' +""" -RETURN = ''' # ''' +RETURN = r""" # """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class SnapshotController(object): - def __init__(self, client, cluster_name): self.client = client self.cluster_name = cluster_name def get_cluster_snapshot_copy_status(self): - response = self.client.describe_clusters( - ClusterIdentifier=self.cluster_name - ) - return response['Clusters'][0].get('ClusterSnapshotCopyStatus') + response = self.client.describe_clusters(ClusterIdentifier=self.cluster_name) + return response["Clusters"][0].get("ClusterSnapshotCopyStatus") def enable_snapshot_copy(self, destination_region, grant_name, retention_period): if grant_name: @@ -119,78 +113,79 @@ class SnapshotController(object): ) def disable_snapshot_copy(self): - self.client.disable_snapshot_copy( - ClusterIdentifier=self.cluster_name - ) + self.client.disable_snapshot_copy(ClusterIdentifier=self.cluster_name) def modify_snapshot_copy_retention_period(self, retention_period): self.client.modify_snapshot_copy_retention_period( - ClusterIdentifier=self.cluster_name, - RetentionPeriod=retention_period + ClusterIdentifier=self.cluster_name, RetentionPeriod=retention_period ) def requesting_unsupported_modifications(actual, requested): - if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or - actual['DestinationRegion'] != requested['destination_region']): + if ( + actual["SnapshotCopyGrantName"] != requested["snapshot_copy_grant"] + or actual["DestinationRegion"] != requested["destination_region"] + ): return True return False def needs_update(actual, requested): - if actual['RetentionPeriod'] != requested['snapshot_retention_period']: + if actual["RetentionPeriod"] != requested["snapshot_retention_period"]: return True return False def run_module(): argument_spec = dict( - cluster_name=dict(type='str', required=True, aliases=['cluster']), - state=dict(type='str', choices=['present', 'absent'], default='present'), - region=dict(type='str', required=True, aliases=['source']), - destination_region=dict(type='str', required=True, aliases=['destination']), - snapshot_copy_grant=dict(type='str', aliases=['copy_grant']), - snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']), + cluster_name=dict(type="str", required=True, aliases=["cluster"]), + state=dict(type="str", choices=["present", "absent"], default="present"), + region=dict(type="str", required=True, aliases=["source"]), + destination_region=dict(type="str", required=True, aliases=["destination"]), + snapshot_copy_grant=dict(type="str", aliases=["copy_grant"]), + snapshot_retention_period=dict(type="int", required=True, aliases=["retention_period"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) result = dict( changed=False, - message='' + message="", ) - connection = module.client('redshift') + connection = module.client("redshift") - snapshot_controller = SnapshotController(client=connection, - cluster_name=module.params.get('cluster_name')) + snapshot_controller = SnapshotController(client=connection, cluster_name=module.params.get("cluster_name")) current_config = snapshot_controller.get_cluster_snapshot_copy_status() if current_config is not None: - if module.params.get('state') == 'present': + if module.params.get("state") == "present": if requesting_unsupported_modifications(current_config, module.params): - message = 'Cannot modify destination_region or grant_name. ' \ - 'Please disable cross-region snapshots, and re-run.' + message = ( + "Cannot modify destination_region or grant_name. Please disable cross-region snapshots, and re-run." + ) module.fail_json(msg=message, **result) if needs_update(current_config, module.params): - result['changed'] = True + result["changed"] = True if not module.check_mode: snapshot_controller.modify_snapshot_copy_retention_period( - module.params.get('snapshot_retention_period') + module.params.get("snapshot_retention_period") ) else: - result['changed'] = True + result["changed"] = True if not module.check_mode: snapshot_controller.disable_snapshot_copy() else: - if module.params.get('state') == 'present': - result['changed'] = True + if module.params.get("state") == "present": + result["changed"] = True if not module.check_mode: - snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'), - module.params.get('snapshot_copy_grant'), - module.params.get('snapshot_retention_period')) + snapshot_controller.enable_snapshot_copy( + module.params.get("destination_region"), + module.params.get("snapshot_copy_grant"), + module.params.get("snapshot_retention_period"), + ) module.exit_json(**result) @@ -198,5 +193,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/redshift_info.py b/ansible_collections/community/aws/plugins/modules/redshift_info.py index ff4da774e..2a346167e 100644 --- a/ansible_collections/community/aws/plugins/modules/redshift_info.py +++ b/ansible_collections/community/aws/plugins/modules/redshift_info.py @@ -1,17 +1,15 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: redshift_info version_added: 1.0.0 -author: "Jens Carl (@j-carl)" +author: + - "Jens Carl (@j-carl)" short_description: Gather information about Redshift cluster(s) description: - Gather information about Redshift cluster(s). @@ -30,13 +28,12 @@ options: required: false type: dict extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws -- amazon.aws.boto3 - -''' + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do net set authentication details, see the AWS guide for details. - name: Find all clusters @@ -65,9 +62,9 @@ EXAMPLES = ''' stack: db register: redshift_user failed_when: "{{ redshift_user.results | length == 0 }}" -''' +""" -RETURN = ''' +RETURN = r""" # For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters) --- cluster_identifier: @@ -273,46 +270,46 @@ iam_roles: returned: success type: list sample: [] -''' +""" import re try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def match_tags(tags_to_match, cluster): for key, value in tags_to_match.items(): - for tag in cluster['Tags']: - if key == tag['Key'] and value == tag['Value']: + for tag in cluster["Tags"]: + if key == tag["Key"] and value == tag["Value"]: return True return False def find_clusters(conn, module, identifier=None, tags=None): - try: - cluster_paginator = conn.get_paginator('describe_clusters') + cluster_paginator = conn.get_paginator("describe_clusters") clusters = cluster_paginator.paginate().build_full_result() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to fetch clusters.') + module.fail_json_aws(e, msg="Failed to fetch clusters.") matched_clusters = [] if identifier is not None: - identifier_prog = re.compile('^' + identifier) - - for cluster in clusters['Clusters']: + identifier_prog = re.compile("^" + identifier) + for cluster in clusters["Clusters"]: matched_identifier = True if identifier: - matched_identifier = identifier_prog.search(cluster['ClusterIdentifier']) + matched_identifier = identifier_prog.search(cluster["ClusterIdentifier"]) matched_tags = True if tags: @@ -325,24 +322,23 @@ def find_clusters(conn, module, identifier=None, tags=None): def main(): - argument_spec = dict( - cluster_identifier=dict(type='str', aliases=['identifier', 'name']), - tags=dict(type='dict') + cluster_identifier=dict(type="str", aliases=["identifier", "name"]), + tags=dict(type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - cluster_identifier = module.params.get('cluster_identifier') - cluster_tags = module.params.get('tags') + cluster_identifier = module.params.get("cluster_identifier") + cluster_tags = module.params.get("tags") - redshift = module.client('redshift') + redshift = module.client("redshift") results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py b/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py index 3c7ca31f5..2ae3a2405 100644 --- a/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py +++ b/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2014 Jens Carl, Hothead Games Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: redshift_subnet_group version_added: 1.0.0 @@ -40,30 +37,30 @@ options: type: list elements: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 author: - "Jens Carl (@j-carl), Hothead Games Inc." -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a Redshift subnet group community.aws.redshift_subnet_group: state: present group_name: redshift-subnet group_description: Redshift subnet group_subnets: - - 'subnet-aaaaa' - - 'subnet-bbbbb' + - 'subnet-aaaaa' + - 'subnet-bbbbb' - name: Remove subnet group community.aws.redshift_subnet_group: state: absent group_name: redshift-subnet -''' +""" -RETURN = r''' +RETURN = r""" cluster_subnet_group: description: A dictionary containing information about the Redshift subnet group. returned: success @@ -92,7 +89,7 @@ cluster_subnet_group: sample: - subnet-aaaaaaaa - subnet-bbbbbbbb -''' +""" try: import botocore @@ -101,10 +98,11 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_subnet_group(name): @@ -112,10 +110,13 @@ def get_subnet_group(name): groups = client.describe_cluster_subnet_groups( aws_retry=True, ClusterSubnetGroupName=name, - )['ClusterSubnetGroups'] - except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + )["ClusterSubnetGroups"] + except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe subnet group") if not groups: @@ -129,23 +130,22 @@ def get_subnet_group(name): # No support for managing tags yet, but make sure that we don't need to # change the return value structure after it's been available in a release. - tags = boto3_tag_list_to_ansible_dict(groups[0]['Tags']) + tags = boto3_tag_list_to_ansible_dict(groups[0]["Tags"]) subnet_group = camel_dict_to_snake_dict(groups[0]) - subnet_group['tags'] = tags - subnet_group['name'] = subnet_group['cluster_subnet_group_name'] + subnet_group["tags"] = tags + subnet_group["name"] = subnet_group["cluster_subnet_group_name"] - subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) - subnet_group['subnet_ids'] = subnet_ids + subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) + subnet_group["subnet_ids"] = subnet_ids return subnet_group def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + module.fail_json(msg="At least one subnet must be provided when creating a subnet group") if module.check_mode: return True @@ -166,13 +166,13 @@ def create_subnet_group(name, description, subnets): def update_subnet_group(subnet_group, name, description, subnets): update_params = dict() - if description and subnet_group['description'] != description: - update_params['Description'] = description + if description and subnet_group["description"] != description: + update_params["Description"] = description if subnets: - old_subnets = set(subnet_group['subnet_ids']) + old_subnets = set(subnet_group["subnet_ids"]) new_subnets = set(subnets) if old_subnets != new_subnets: - update_params['SubnetIds'] = list(subnets) + update_params["SubnetIds"] = list(subnets) if not update_params: return False @@ -181,8 +181,8 @@ def update_subnet_group(subnet_group, name, description, subnets): return True # Description is optional, SubnetIds is not - if 'SubnetIds' not in update_params: - update_params['SubnetIds'] = subnet_group['subnet_ids'] + if "SubnetIds" not in update_params: + update_params["SubnetIds"] = subnet_group["subnet_ids"] try: client.modify_cluster_subnet_group( @@ -197,7 +197,6 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): - if module.check_mode: return True @@ -207,20 +206,23 @@ def delete_subnet_group(name): ClusterSubnetGroupName=name, ) return True - except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): # AWS is "eventually consistent", cope with the race conditions where # deletion hadn't completed when we ran describe return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete subnet group") def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, aliases=['group_name']), - description=dict(required=False, aliases=['group_description']), - subnets=dict(required=False, aliases=['group_subnets'], type='list', elements='str'), + state=dict(default="present", choices=["present", "absent"]), + name=dict(required=True, aliases=["group_name"]), + description=dict(required=False, aliases=["group_description"]), + subnets=dict(required=False, aliases=["group_subnets"], type="list", elements="str"), ) global module @@ -231,17 +233,17 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') - name = module.params.get('name') - description = module.params.get('description') - subnets = module.params.get('subnets') + state = module.params.get("state") + name = module.params.get("name") + description = module.params.get("description") + subnets = module.params.get("subnets") - client = module.client('redshift', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("redshift", retry_decorator=AWSRetry.jittered_backoff()) subnet_group = get_subnet_group(name) changed = False - if state == 'present': + if state == "present": if not subnet_group: result = create_subnet_group(name, description, subnets) changed |= result @@ -257,9 +259,9 @@ def main(): compat_results = dict() if subnet_group: - compat_results['group'] = dict( - name=subnet_group['name'], - vpc_id=subnet_group['vpc_id'], + compat_results["group"] = dict( + name=subnet_group["name"], + vpc_id=subnet_group["vpc_id"], ) module.exit_json( @@ -269,5 +271,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/route53_wait.py b/ansible_collections/community/aws/plugins/modules/route53_wait.py new file mode 100644 index 000000000..6b72681d4 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/route53_wait.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: route53_wait +version_added: 6.3.0 +short_description: wait for changes in Amazons Route 53 DNS service to propagate +description: + - When using M(amazon.aws.route53) with I(wait=false), this module allows to wait for the + module's propagation to finish at a later point of time. +options: + result: + aliases: + - results + description: + - The registered result of one or multiple M(amazon.aws.route53) invocations. + required: true + type: dict + wait_timeout: + description: + - How long to wait for the changes to be replicated, in seconds. + - This timeout will be used for every changed result in I(result). + default: 300 + type: int + region: + description: + - This setting is ignored by the module. It is only present to make it possible to + have I(region) present in the module default group. + type: str +author: + - Felix Fontein (@felixfontein) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.boto3 +""" + +RETURN = r""" +# +""" + +EXAMPLES = r""" +# Example when using a single route53 invocation: + +- name: Add new.foo.com as an A record with 3 IPs + amazon.aws.route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: + - 1.1.1.1 + - 2.2.2.2 + - 3.3.3.3 + register: module_result + +# do something else + +- name: Wait for the changes of the above route53 invocation to propagate + community.aws.route53_wait: + result: "{{ module_result }}" + +######################################################################### +# Example when using a loop over amazon.aws.route53: + +- name: Add various A records + amazon.aws.route53: + state: present + zone: foo.com + record: "{{ item.record }}" + type: A + ttl: 300 + value: "{{ item.value }}" + loop: + - record: new.foo.com + value: 1.1.1.1 + - record: foo.foo.com + value: 2.2.2.2 + - record: bar.foo.com + value: + - 3.3.3.3 + - 4.4.4.4 + register: module_results + +# do something else + +- name: Wait for the changes of the above three route53 invocations to propagate + community.aws.route53_wait: + results: "{{ module_results }}" +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + +WAIT_RETRY = 5 # how many seconds to wait between propagation status polls + + +def detect_task_results(results): + if "results" in results: + # This must be the registered result of a loop of route53 tasks + for key in ("changed", "msg", "skipped"): + if key not in results: + raise ValueError(f"missing {key} key") + if not isinstance(results["results"], list): + raise ValueError("results is present, but not a list") + for index, result in enumerate(results["results"]): + if not isinstance(result, dict): + raise ValueError(f"result {index + 1} is not a dictionary") + for key in ("changed", "failed", "ansible_loop_var", "invocation"): + if key not in result: + raise ValueError(f"missing {key} key for result {index + 1}") + yield f" for result #{index + 1}", result + return + # This must be a single route53 task + for key in ("changed", "failed"): + if key not in results: + raise ValueError(f"missing {key} key") + yield "", results + + +def main(): + argument_spec = dict( + result=dict(type="dict", required=True, aliases=["results"]), + wait_timeout=dict(type="int", default=300), + region=dict(type="str"), # ignored + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + result_in = module.params["result"] + wait_timeout_in = module.params.get("wait_timeout") + + changed_results = [] + try: + for id, result in detect_task_results(result_in): + if result.get("wait_id"): + changed_results.append((id, result["wait_id"])) + except ValueError as exc: + module.fail_json( + msg=f"The value passed as result does not seem to be a registered route53 result: {to_native(exc)}" + ) + + # connect to the route53 endpoint + try: + route53 = module.client("route53") + except botocore.exceptions.HTTPClientError as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + for what, wait_id in changed_results: + try: + waiter = get_waiter(route53, "resource_record_sets_changed") + waiter.wait( + Id=wait_id, + WaiterConfig=dict( + Delay=WAIT_RETRY, + MaxAttempts=wait_timeout_in // WAIT_RETRY, + ), + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg=f"Timeout waiting for resource records changes{what} to be applied") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to update records") + except Exception as e: + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") + + module.exit_json(changed=False) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py b/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py index 645ca6989..1045164dc 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py +++ b/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py @@ -1,15 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Ansible Project # (c) 2019, XLAB d.o.o <www.xlab.si> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_bucket_notification version_added: 1.0.0 @@ -104,12 +100,12 @@ options: type: str default: '' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- # Examples adding notification target configs to a S3 bucket - name: Setup bucket event notification to a Lambda function @@ -138,9 +134,9 @@ EXAMPLES = r''' state: absent event_name: on_file_add_or_remove bucket_name: test-bucket -''' +""" -RETURN = r''' +RETURN = r""" notification_configuration: description: dictionary of currently applied notifications returned: success @@ -158,51 +154,50 @@ notification_configuration: description: - List of current SNS notification configurations applied to the bucket. type: list -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # will be protected by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class AmazonBucket: def __init__(self, module, client): self.module = module self.client = client - self.bucket_name = module.params['bucket_name'] + self.bucket_name = module.params["bucket_name"] self.check_mode = module.check_mode self._full_config_cache = None def full_config(self): if self._full_config_cache is None: self._full_config_cache = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] + QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[] ) try: - config_lookup = self.client.get_bucket_notification_configuration( - Bucket=self.bucket_name) + config_lookup = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg='{0}'.format(e)) + self.module.fail_json(msg=f"{e}") # Handle different event targets - if config_lookup.get('QueueConfigurations'): - for queue_config in config_lookup.get('QueueConfigurations'): - self._full_config_cache['QueueConfigurations'].append(Config.from_api(queue_config)) + if config_lookup.get("QueueConfigurations"): + for queue_config in config_lookup.get("QueueConfigurations"): + self._full_config_cache["QueueConfigurations"].append(Config.from_api(queue_config)) - if config_lookup.get('TopicConfigurations'): - for topic_config in config_lookup.get('TopicConfigurations'): - self._full_config_cache['TopicConfigurations'].append(Config.from_api(topic_config)) + if config_lookup.get("TopicConfigurations"): + for topic_config in config_lookup.get("TopicConfigurations"): + self._full_config_cache["TopicConfigurations"].append(Config.from_api(topic_config)) - if config_lookup.get('LambdaFunctionConfigurations'): - for function_config in config_lookup.get('LambdaFunctionConfigurations'): - self._full_config_cache['LambdaFunctionConfigurations'].append(Config.from_api(function_config)) + if config_lookup.get("LambdaFunctionConfigurations"): + for function_config in config_lookup.get("LambdaFunctionConfigurations"): + self._full_config_cache["LambdaFunctionConfigurations"].append(Config.from_api(function_config)) return self._full_config_cache @@ -210,70 +205,59 @@ class AmazonBucket: # Iterate through configs and get current event config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.raw['Id'] == config_name: + if config.raw["Id"] == config_name: return config def apply_config(self, desired): - configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) # Iterate through existing configs then add the desired config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.name != desired.raw['Id']: + if config.name != desired.raw["Id"]: configs[target_configs].append(config.raw) - if self.module.params.get('queue_arn'): - configs['QueueConfigurations'].append(desired.raw) - if self.module.params.get('topic_arn'): - configs['TopicConfigurations'].append(desired.raw) - if self.module.params.get('lambda_function_arn'): - configs['LambdaFunctionConfigurations'].append(desired.raw) + if self.module.params.get("queue_arn"): + configs["QueueConfigurations"].append(desired.raw) + if self.module.params.get("topic_arn"): + configs["TopicConfigurations"].append(desired.raw) + if self.module.params.get("lambda_function_arn"): + configs["LambdaFunctionConfigurations"].append(desired.raw) self._upload_bucket_config(configs) return configs def delete_config(self, desired): - configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) # Iterate through existing configs omitting specified config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.name != desired.raw['Id']: + if config.name != desired.raw["Id"]: configs[target_configs].append(config.raw) self._upload_bucket_config(configs) return configs def _upload_bucket_config(self, configs): - api_params = dict( - Bucket=self.bucket_name, - NotificationConfiguration=dict() - ) + api_params = dict(Bucket=self.bucket_name, NotificationConfiguration=dict()) # Iterate through available configs for target_configs in configs: if len(configs[target_configs]) > 0: - api_params['NotificationConfiguration'][target_configs] = configs[target_configs] + api_params["NotificationConfiguration"][target_configs] = configs[target_configs] if not self.check_mode: try: self.client.put_bucket_notification_configuration(**api_params) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg='{0}'.format(e)) + self.module.fail_json(msg=f"{e}") class Config: def __init__(self, content): self._content = content - self.name = content.get('Id') + self.name = content.get("Id") @property def raw(self): @@ -289,41 +273,35 @@ class Config: """Generate bucket notification params for target""" bucket_event_params = dict( - Id=params['event_name'], - Events=sorted(params['events']), + Id=params["event_name"], + Events=sorted(params["events"]), Filter=dict( Key=dict( FilterRules=[ - dict( - Name='Prefix', - Value=params['prefix'] - ), - dict( - Name='Suffix', - Value=params['suffix'] - ) + dict(Name="Prefix", Value=params["prefix"]), + dict(Name="Suffix", Value=params["suffix"]), ] ) - ) + ), ) # Handle different event targets - if params.get('queue_arn'): - bucket_event_params['QueueArn'] = params['queue_arn'] - if params.get('topic_arn'): - bucket_event_params['TopicArn'] = params['topic_arn'] - if params.get('lambda_function_arn'): - function_arn = params['lambda_function_arn'] + if params.get("queue_arn"): + bucket_event_params["QueueArn"] = params["queue_arn"] + if params.get("topic_arn"): + bucket_event_params["TopicArn"] = params["topic_arn"] + if params.get("lambda_function_arn"): + function_arn = params["lambda_function_arn"] qualifier = None - if params['lambda_version'] > 0: - qualifier = str(params['lambda_version']) - elif params['lambda_alias']: - qualifier = str(params['lambda_alias']) + if params["lambda_version"] > 0: + qualifier = str(params["lambda_version"]) + elif params["lambda_alias"]: + qualifier = str(params["lambda_alias"]) if qualifier: - params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + params["lambda_function_arn"] = f"{function_arn}:{qualifier}" - bucket_event_params['LambdaFunctionArn'] = params['lambda_function_arn'] + bucket_event_params["LambdaFunctionArn"] = params["lambda_function_arn"] return cls(bucket_event_params) @@ -333,66 +311,70 @@ class Config: def setup_module_object(): - event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', - 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', - 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', - 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', - 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] + event_types = [ + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:Post", + "s3:ObjectRestore:Completed", + "s3:ReducedRedundancyLostObject", + ] argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), event_name=dict(required=True), - lambda_function_arn=dict(aliases=['function_arn']), - queue_arn=dict(type='str'), - topic_arn=dict(type='str'), + lambda_function_arn=dict(aliases=["function_arn"]), + queue_arn=dict(type="str"), + topic_arn=dict(type="str"), bucket_name=dict(required=True), - events=dict(type='list', default=[], choices=event_types, elements='str'), - prefix=dict(default=''), - suffix=dict(default=''), + events=dict(type="list", default=[], choices=event_types, elements="str"), + prefix=dict(default=""), + suffix=dict(default=""), lambda_alias=dict(), - lambda_version=dict(type='int', default=0), + lambda_version=dict(type="int", default=0), ) mutually_exclusive = [ - ['queue_arn', 'topic_arn', 'lambda_function_arn'], - ['lambda_alias', 'lambda_version'] + ["queue_arn", "topic_arn", "lambda_function_arn"], + ["lambda_alias", "lambda_version"], ] return AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive, - required_if=[['state', 'present', ['events']]] + required_if=[["state", "present", ["events"]]], ) def main(): module = setup_module_object() - client = module.client('s3') + client = module.client("s3") bucket = AmazonBucket(module, client) - current = bucket.current_config(module.params['event_name']) + current = bucket.current_config(module.params["event_name"]) desired = Config.from_params(**module.params) - notification_configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + notification_configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) for target_configs in bucket.full_config(): for cfg in bucket.full_config()[target_configs]: notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg.raw)) - state = module.params['state'] + state = module.params["state"] updated_configuration = dict() changed = False - if state == 'present': + if state == "present": if current != desired: updated_configuration = bucket.apply_config(desired) changed = True - elif state == 'absent': + elif state == "absent": if current: updated_configuration = bucket.delete_config(desired) changed = True @@ -402,9 +384,8 @@ def main(): for cfg in updated_configuration.get(target_configs, list()): notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg)) - module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict( - notification_configs)) + module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict(notification_configs)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/s3_cors.py b/ansible_collections/community/aws/plugins/modules/s3_cors.py index 753e395f9..d153c7df8 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_cors.py +++ b/ansible_collections/community/aws/plugins/modules/s3_cors.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_cors version_added: 1.0.0 @@ -36,12 +33,12 @@ options: choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create a simple cors for s3 bucket @@ -65,9 +62,9 @@ EXAMPLES = r''' - community.aws.s3_cors: name: mys3bucket state: absent -''' +""" -RETURN = r''' +RETURN = r""" changed: description: check to see if a change was made to the rules returned: always @@ -96,25 +93,28 @@ rules: "max_age_seconds": 30000 } ] -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies -def create_or_update_bucket_cors(connection, module): +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + +def create_or_update_bucket_cors(connection, module): name = module.params.get("name") rules = module.params.get("rules", []) changed = False try: - current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules'] + current_camel_rules = connection.get_bucket_cors(Bucket=name)["CORSRules"] except ClientError: current_camel_rules = [] @@ -125,15 +125,14 @@ def create_or_update_bucket_cors(connection, module): if changed: try: - cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules}) + cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to update CORS for bucket {name}") module.exit_json(changed=changed, name=name, rules=rules) def destroy_bucket_cors(connection, module): - name = module.params.get("name") changed = False @@ -141,30 +140,29 @@ def destroy_bucket_cors(connection, module): cors = connection.delete_bucket_cors(Bucket=name) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to delete CORS for bucket {name}") module.exit_json(changed=changed) def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - rules=dict(type='list', elements='dict'), - state=dict(type='str', choices=['present', 'absent'], required=True) + name=dict(required=True, type="str"), + rules=dict(type="list", elements="dict"), + state=dict(type="str", choices=["present", "absent"], required=True), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client = module.client('s3') + client = module.client("s3") state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_bucket_cors(client, module) - elif state == 'absent': + elif state == "absent": destroy_bucket_cors(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py b/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py index 660bca869..2f48e06d4 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py +++ b/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_lifecycle version_added: 1.0.0 short_description: Manage S3 bucket lifecycle rules in AWS description: - - Manage S3 bucket lifecycle rules in AWS. -author: "Rob White (@wimnat)" + - Manage S3 bucket lifecycle rules in AWS. +author: + - "Rob White (@wimnat)" notes: - If specifying expiration time as days then transition time must also be specified in days. - If specifying expiration time as a date then transition time must also be specified as a date. @@ -69,7 +68,6 @@ options: noncurrent_version_keep_newer: description: - The minimum number of non-current versions to retain. - - Requires C(botocore >= 1.23.12) - Requres I(noncurrent_version_expiration_days). required: false type: int @@ -149,13 +147,14 @@ options: type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days @@ -219,14 +218,15 @@ EXAMPLES = r''' storage_class: standard_ia - transition_days: 90 storage_class: glacier -''' +""" -from copy import deepcopy import datetime import time +from copy import deepcopy try: from dateutil import parser as date_parser + HAS_DATEUTIL = True except ImportError: HAS_DATEUTIL = False @@ -236,11 +236,12 @@ try: except ImportError: pass # handled by AnsibleAwsModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def parse_date(date): @@ -260,10 +261,13 @@ def fetch_rules(client, module, name): # Get the bucket's current lifecycle rules try: current_lifecycle = client.get_bucket_lifecycle_configuration(aws_retry=True, Bucket=name) - current_lifecycle_rules = normalize_boto3_result(current_lifecycle['Rules']) - except is_boto3_error_code('NoSuchLifecycleConfiguration'): + current_lifecycle_rules = normalize_boto3_result(current_lifecycle["Rules"]) + except is_boto3_error_code("NoSuchLifecycleConfiguration"): current_lifecycle_rules = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) return current_lifecycle_rules @@ -290,35 +294,37 @@ def build_rule(client, module): rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) if rule_id is not None: - rule['ID'] = rule_id + rule["ID"] = rule_id if abort_incomplete_multipart_upload_days: - rule['AbortIncompleteMultipartUpload'] = { - 'DaysAfterInitiation': abort_incomplete_multipart_upload_days - } + rule["AbortIncompleteMultipartUpload"] = {"DaysAfterInitiation": abort_incomplete_multipart_upload_days} # Create expiration if expiration_days is not None: - rule['Expiration'] = dict(Days=expiration_days) + rule["Expiration"] = dict(Days=expiration_days) elif expiration_date is not None: - rule['Expiration'] = dict(Date=expiration_date.isoformat()) + rule["Expiration"] = dict(Date=expiration_date.isoformat()) elif expire_object_delete_marker is not None: - rule['Expiration'] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) + rule["Expiration"] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) if noncurrent_version_expiration_days or noncurrent_version_keep_newer: - rule['NoncurrentVersionExpiration'] = dict() + rule["NoncurrentVersionExpiration"] = dict() if noncurrent_version_expiration_days is not None: - rule['NoncurrentVersionExpiration']['NoncurrentDays'] = noncurrent_version_expiration_days + rule["NoncurrentVersionExpiration"]["NoncurrentDays"] = noncurrent_version_expiration_days if noncurrent_version_keep_newer is not None: - rule['NoncurrentVersionExpiration']['NewerNoncurrentVersions'] = noncurrent_version_keep_newer + rule["NoncurrentVersionExpiration"]["NewerNoncurrentVersions"] = noncurrent_version_keep_newer if transition_days is not None: - rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ] + rule["Transitions"] = [ + dict(Days=transition_days, StorageClass=storage_class.upper()), + ] elif transition_date is not None: - rule['Transitions'] = [dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), ] + rule["Transitions"] = [ + dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), + ] if transitions is not None: - if not rule.get('Transitions'): - rule['Transitions'] = [] + if not rule.get("Transitions"): + rule["Transitions"] = [] for transition in transitions: t_out = dict() if transition.get("transition_date"): @@ -330,18 +336,21 @@ def build_rule(client, module): rule["Transitions"].append(t_out) if noncurrent_version_transition_days is not None: - rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days, - StorageClass=noncurrent_version_storage_class.upper()), ] + rule["NoncurrentVersionTransitions"] = [ + dict( + NoncurrentDays=noncurrent_version_transition_days, StorageClass=noncurrent_version_storage_class.upper() + ), + ] if noncurrent_version_transitions is not None: - if not rule.get('NoncurrentVersionTransitions'): - rule['NoncurrentVersionTransitions'] = [] + if not rule.get("NoncurrentVersionTransitions"): + rule["NoncurrentVersionTransitions"] = [] for noncurrent_version_transition in noncurrent_version_transitions: t_out = dict() - t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days'] - if noncurrent_version_transition.get('storage_class'): - t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper() - rule['NoncurrentVersionTransitions'].append(t_out) + t_out["NoncurrentDays"] = noncurrent_version_transition["transition_days"] + if noncurrent_version_transition.get("storage_class"): + t_out["StorageClass"] = noncurrent_version_transition["storage_class"].upper() + rule["NoncurrentVersionTransitions"].append(t_out) return rule @@ -358,23 +367,29 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru if current_lifecycle_rules: # If rule ID exists, use that for comparison otherwise compare based on prefix for existing_rule in current_lifecycle_rules: - if rule.get('ID') == existing_rule.get('ID') and rule['Filter'].get('Prefix', '') != existing_rule.get('Filter', {}).get('Prefix', ''): - existing_rule.pop('ID') - elif rule_id is None and rule['Filter'].get('Prefix', '') == existing_rule.get('Filter', {}).get('Prefix', ''): - existing_rule.pop('ID') - if rule.get('ID') == existing_rule.get('ID'): - changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration) + if rule.get("ID") == existing_rule.get("ID") and rule["Filter"].get("Prefix", "") != existing_rule.get( + "Filter", {} + ).get("Prefix", ""): + existing_rule.pop("ID") + elif rule_id is None and rule["Filter"].get("Prefix", "") == existing_rule.get("Filter", {}).get( + "Prefix", "" + ): + existing_rule.pop("ID") + if rule.get("ID") == existing_rule.get("ID"): + changed_, appended_ = update_or_append_rule( + rule, existing_rule, purge_transitions, lifecycle_configuration + ) changed = changed_ or changed appended = appended_ or appended else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) # If nothing appended then append now as the rule must not exist if not appended: - lifecycle_configuration['Rules'].append(rule) + lifecycle_configuration["Rules"].append(rule) changed = True else: - lifecycle_configuration['Rules'].append(rule) + lifecycle_configuration["Rules"].append(rule) changed = True return changed, lifecycle_configuration @@ -382,24 +397,24 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj): changed = False - if existing_rule['Status'] != new_rule['Status']: - if not new_rule.get('Transitions') and existing_rule.get('Transitions'): - new_rule['Transitions'] = existing_rule['Transitions'] - if not new_rule.get('Expiration') and existing_rule.get('Expiration'): - new_rule['Expiration'] = existing_rule['Expiration'] - if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'): - new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration'] - lifecycle_obj['Rules'].append(new_rule) + if existing_rule["Status"] != new_rule["Status"]: + if not new_rule.get("Transitions") and existing_rule.get("Transitions"): + new_rule["Transitions"] = existing_rule["Transitions"] + if not new_rule.get("Expiration") and existing_rule.get("Expiration"): + new_rule["Expiration"] = existing_rule["Expiration"] + if not new_rule.get("NoncurrentVersionExpiration") and existing_rule.get("NoncurrentVersionExpiration"): + new_rule["NoncurrentVersionExpiration"] = existing_rule["NoncurrentVersionExpiration"] + lifecycle_obj["Rules"].append(new_rule) changed = True appended = True else: if not purge_transitions: merge_transitions(new_rule, existing_rule) if compare_rule(new_rule, existing_rule, purge_transitions): - lifecycle_obj['Rules'].append(new_rule) + lifecycle_obj["Rules"].append(new_rule) appended = True else: - lifecycle_obj['Rules'].append(new_rule) + lifecycle_obj["Rules"].append(new_rule) changed = True appended = True return changed, appended @@ -413,24 +428,23 @@ def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None): # If an ID exists, use that otherwise compare based on prefix if rule_id is not None: for existing_rule in current_lifecycle_rules: - if rule_id == existing_rule['ID']: + if rule_id == existing_rule["ID"]: # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) else: for existing_rule in current_lifecycle_rules: - if prefix == existing_rule['Filter'].get('Prefix', ''): + if prefix == existing_rule["Filter"].get("Prefix", ""): # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) return changed, lifecycle_configuration def compare_rule(new_rule, old_rule, purge_transitions): - # Copy objects rule1 = deepcopy(new_rule) rule2 = deepcopy(old_rule) @@ -438,10 +452,10 @@ def compare_rule(new_rule, old_rule, purge_transitions): if purge_transitions: return rule1 == rule2 else: - transitions1 = rule1.pop('Transitions', []) - transitions2 = rule2.pop('Transitions', []) - noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', []) - noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', []) + transitions1 = rule1.pop("Transitions", []) + transitions2 = rule2.pop("Transitions", []) + noncurrent_transtions1 = rule1.pop("NoncurrentVersionTransitions", []) + noncurrent_transtions2 = rule2.pop("NoncurrentVersionTransitions", []) if rule1 != rule2: return False for transition in transitions1: @@ -459,39 +473,39 @@ def merge_transitions(updated_rule, updating_rule): # in updating_rule to updated_rule updated_transitions = {} updating_transitions = {} - for transition in updated_rule.get('Transitions', []): - updated_transitions[transition['StorageClass']] = transition - for transition in updating_rule.get('Transitions', []): - updating_transitions[transition['StorageClass']] = transition + for transition in updated_rule.get("Transitions", []): + updated_transitions[transition["StorageClass"]] = transition + for transition in updating_rule.get("Transitions", []): + updating_transitions[transition["StorageClass"]] = transition for storage_class, transition in updating_transitions.items(): if updated_transitions.get(storage_class) is None: - updated_rule['Transitions'].append(transition) + updated_rule["Transitions"].append(transition) def create_lifecycle_rule(client, module): - name = module.params.get("name") wait = module.params.get("wait") changed = False old_lifecycle_rules = fetch_rules(client, module, name) new_rule = build_rule(client, module) - (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - old_lifecycle_rules, - new_rule) + (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) if changed: # Write lifecycle to bucket try: client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_configuration, + aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration ) except is_boto3_error_message("At least one action needs to be specified in a rule"): # Amazon interpreted this as not changing anything changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules + ) _changed = changed _retries = 10 @@ -504,9 +518,7 @@ def create_lifecycle_rule(client, module): time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - new_rules, - new_rule) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) if not _changed: _not_changed_cnt -= 1 _changed = True @@ -517,13 +529,17 @@ def create_lifecycle_rule(client, module): new_rules = fetch_rules(client, module, name) - module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules, - old_rules=old_lifecycle_rules, _retries=_retries, - _config=lifecycle_configuration) + module.exit_json( + changed=changed, + new_rule=new_rule, + rules=new_rules, + old_rules=old_lifecycle_rules, + _retries=_retries, + _config=lifecycle_configuration, + ) def destroy_lifecycle_rule(client, module): - name = module.params.get("name") prefix = module.params.get("prefix") rule_id = module.params.get("rule_id") @@ -539,11 +555,10 @@ def destroy_lifecycle_rule(client, module): if changed: # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration try: - if lifecycle_obj['Rules']: + if lifecycle_obj["Rules"]: client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_obj) + aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_obj + ) elif current_lifecycle_rules: changed = True client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) @@ -572,33 +587,32 @@ def destroy_lifecycle_rule(client, module): new_rules = fetch_rules(client, module, name) - module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, - _retries=_retries) + module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, _retries=_retries) def main(): - s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] + s3_storage_class = ["glacier", "onezone_ia", "standard_ia", "intelligent_tiering", "deep_archive"] argument_spec = dict( - name=dict(required=True, type='str'), - abort_incomplete_multipart_upload_days=dict(type='int'), - expiration_days=dict(type='int'), + name=dict(required=True, type="str"), + abort_incomplete_multipart_upload_days=dict(type="int"), + expiration_days=dict(type="int"), expiration_date=dict(), - expire_object_delete_marker=dict(type='bool'), - noncurrent_version_expiration_days=dict(type='int'), - noncurrent_version_keep_newer=dict(type='int'), - noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class), - noncurrent_version_transition_days=dict(type='int'), - noncurrent_version_transitions=dict(type='list', elements='dict'), + expire_object_delete_marker=dict(type="bool"), + noncurrent_version_expiration_days=dict(type="int"), + noncurrent_version_keep_newer=dict(type="int"), + noncurrent_version_storage_class=dict(default="glacier", type="str", choices=s3_storage_class), + noncurrent_version_transition_days=dict(type="int"), + noncurrent_version_transitions=dict(type="list", elements="dict"), prefix=dict(), rule_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - status=dict(default='enabled', choices=['enabled', 'disabled']), - storage_class=dict(default='glacier', type='str', choices=s3_storage_class), - transition_days=dict(type='int'), + state=dict(default="present", choices=["present", "absent"]), + status=dict(default="enabled", choices=["enabled", "disabled"]), + storage_class=dict(default="glacier", type="str", choices=s3_storage_class), + transition_days=dict(type="int"), transition_date=dict(), - transitions=dict(type='list', elements='dict'), - purge_transitions=dict(default=True, type='bool'), - wait=dict(type='bool', default=False) + transitions=dict(type="list", elements="dict"), + purge_transitions=dict(default=True, type="bool"), + wait=dict(type="bool", default=False), ) module = AnsibleAWSModule( @@ -617,51 +631,54 @@ def main(): }, ) - client = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) expiration_date = module.params.get("expiration_date") transition_date = module.params.get("transition_date") state = module.params.get("state") - if module.params.get("noncurrent_version_keep_newer"): - module.require_botocore_at_least( - "1.23.12", - reason="to set number of versions to keep with noncurrent_version_keep_newer" + if state == "present" and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix + required_when_present = ( + "abort_incomplete_multipart_upload_days", + "expiration_date", + "expiration_days", + "expire_object_delete_marker", + "transition_date", + "transition_days", + "transitions", + "noncurrent_version_expiration_days", + "noncurrent_version_keep_newer", + "noncurrent_version_transition_days", + "noncurrent_version_transitions", ) - - if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix - - required_when_present = ('abort_incomplete_multipart_upload_days', - 'expiration_date', 'expiration_days', 'expire_object_delete_marker', - 'transition_date', 'transition_days', 'transitions', - 'noncurrent_version_expiration_days', - 'noncurrent_version_keep_newer', - 'noncurrent_version_transition_days', - 'noncurrent_version_transitions') for param in required_when_present: if module.params.get(param) is None: break else: - msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present) + msg = f"one of the following is required when 'state' is 'present': {', '.join(required_when_present)}" module.fail_json(msg=msg) # If dates have been set, make sure they're in a valid format if expiration_date: expiration_date = parse_date(expiration_date) if expiration_date is None: - module.fail_json(msg="expiration_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included") + module.fail_json( + msg="expiration_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included" + ) if transition_date: transition_date = parse_date(transition_date) if transition_date is None: - module.fail_json(msg="transition_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included") + module.fail_json( + msg="transition_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included" + ) - if state == 'present': + if state == "present": create_lifecycle_rule(client, module) - elif state == 'absent': + elif state == "absent": destroy_lifecycle_rule(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/s3_logging.py b/ansible_collections/community/aws/plugins/modules/s3_logging.py index 011baa951..3a7874994 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_logging.py +++ b/ansible_collections/community/aws/plugins/modules/s3_logging.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_logging version_added: 1.0.0 short_description: Manage logging facility of an s3 bucket in AWS description: - - Manage logging facility of an s3 bucket in AWS -author: Rob White (@wimnat) + - Manage logging facility of an s3 bucket in AWS +author: + - Rob White (@wimnat) options: name: description: @@ -36,13 +35,14 @@ options: default: "" type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs @@ -56,32 +56,31 @@ EXAMPLES = ''' community.aws.s3_logging: name: mywebsite.com state: absent - -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): - if not bucket_logging.get('LoggingEnabled', False): +def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): + if not bucket_logging.get("LoggingEnabled", False): if target_bucket: return True return False - logging = bucket_logging['LoggingEnabled'] - if logging['TargetBucket'] != target_bucket: + logging = bucket_logging["LoggingEnabled"] + if logging["TargetBucket"] != target_bucket: return True - if logging['TargetPrefix'] != target_prefix: + if logging["TargetPrefix"] != target_prefix: return True return False @@ -89,18 +88,18 @@ def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): def verify_acls(connection, module, target_bucket): try: current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket) - current_grants = current_acl['Grants'] - except is_boto3_error_code('NoSuchBucket'): - module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + current_grants = current_acl["Grants"] + except is_boto3_error_code("NoSuchBucket"): + module.fail_json(msg=f"Target Bucket '{target_bucket}' not found") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to fetch target bucket ACL") required_grant = { - 'Grantee': { - 'URI': "http://acs.amazonaws.com/groups/s3/LogDelivery", - 'Type': 'Group' - }, - 'Permission': 'FULL_CONTROL' + "Grantee": {"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", "Type": "Group"}, + "Permission": "FULL_CONTROL", } for grant in current_grants: @@ -113,8 +112,8 @@ def verify_acls(connection, module, target_bucket): updated_acl = dict(current_acl) updated_grants = list(current_grants) updated_grants.append(required_grant) - updated_acl['Grants'] = updated_grants - del updated_acl['ResponseMetadata'] + updated_acl["Grants"] = updated_grants + del updated_acl["ResponseMetadata"] try: connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -124,7 +123,6 @@ def verify_acls(connection, module, target_bucket): def enable_bucket_logging(connection, module): - bucket_name = module.params.get("name") target_bucket = module.params.get("target_bucket") target_prefix = module.params.get("target_prefix") @@ -132,9 +130,12 @@ def enable_bucket_logging(connection, module): try: bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) - except is_boto3_error_code('NoSuchBucket'): - module.fail_json(msg="Bucket '{0}' not found".format(bucket_name)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchBucket"): + module.fail_json(msg=f"Bucket '{bucket_name}' not found") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to fetch current logging status") try: @@ -151,11 +152,12 @@ def enable_bucket_logging(connection, module): aws_retry=True, Bucket=bucket_name, BucketLoggingStatus={ - 'LoggingEnabled': { - 'TargetBucket': target_bucket, - 'TargetPrefix': target_prefix, + "LoggingEnabled": { + "TargetBucket": target_bucket, + "TargetPrefix": target_prefix, } - }) + }, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to enable bucket logging") @@ -165,7 +167,6 @@ def enable_bucket_logging(connection, module): def disable_bucket_logging(connection, module): - bucket_name = module.params.get("name") changed = False @@ -181,11 +182,9 @@ def disable_bucket_logging(connection, module): module.exit_json(changed=True) try: - response = AWSRetry.jittered_backoff( - catch_extra_error_codes=['InvalidTargetBucketForLogging'] - )(connection.put_bucket_logging)( - Bucket=bucket_name, BucketLoggingStatus={} - ) + response = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])( + connection.put_bucket_logging + )(Bucket=bucket_name, BucketLoggingStatus={}) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to disable bucket logging") @@ -193,24 +192,23 @@ def disable_bucket_logging(connection, module): def main(): - argument_spec = dict( name=dict(required=True), target_bucket=dict(required=False, default=None), target_prefix=dict(required=False, default=""), - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": enable_bucket_logging(connection, module) - elif state == 'absent': + elif state == "absent": disable_bucket_logging(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py b/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py index dff566821..4e62b7bf8 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py +++ b/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py @@ -1,23 +1,22 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_metrics_configuration version_added: 1.3.0 short_description: Manage s3 bucket metrics configuration in AWS description: - - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket -author: Dmytro Vorotyntsev (@vorotech) + - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket +author: + - Dmytro Vorotyntsev (@vorotech) notes: - - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations - - To request metrics for the entire bucket, create a metrics configuration without a filter - - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on + - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations + - To request metrics for the entire bucket, create a metrics configuration without a filter + - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on options: bucket_name: description: @@ -48,13 +47,14 @@ options: choices: ['present', 'absent'] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a metrics configuration that enables metrics for an entire bucket @@ -93,56 +93,47 @@ EXAMPLES = r''' bucket_name: my-bucket id: EntireBucket state: absent - -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _create_metrics_configuration(mc_id, filter_prefix, filter_tags): - payload = { - 'Id': mc_id - } + payload = {"Id": mc_id} # Just a filter_prefix or just a single tag filter is a special case if filter_prefix and not filter_tags: - payload['Filter'] = { - 'Prefix': filter_prefix - } + payload["Filter"] = {"Prefix": filter_prefix} elif not filter_prefix and len(filter_tags) == 1: - payload['Filter'] = { - 'Tag': ansible_dict_to_boto3_tag_list(filter_tags)[0] - } + payload["Filter"] = {"Tag": ansible_dict_to_boto3_tag_list(filter_tags)[0]} # Otherwise we need to use 'And' elif filter_tags: - payload['Filter'] = { - 'And': { - 'Tags': ansible_dict_to_boto3_tag_list(filter_tags) - } - } + payload["Filter"] = {"And": {"Tags": ansible_dict_to_boto3_tag_list(filter_tags)}} if filter_prefix: - payload['Filter']['And']['Prefix'] = filter_prefix + payload["Filter"]["And"]["Prefix"] = filter_prefix return payload def create_or_update_metrics_configuration(client, module): - bucket_name = module.params.get('bucket_name') - mc_id = module.params.get('id') - filter_prefix = module.params.get('filter_prefix') - filter_tags = module.params.get('filter_tags') + bucket_name = module.params.get("bucket_name") + mc_id = module.params.get("id") + filter_prefix = module.params.get("filter_prefix") + filter_tags = module.params.get("filter_tags") try: response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - metrics_configuration = response['MetricsConfiguration'] - except is_boto3_error_code('NoSuchConfiguration'): + metrics_configuration = response["MetricsConfiguration"] + except is_boto3_error_code("NoSuchConfiguration"): metrics_configuration = None except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") @@ -158,24 +149,21 @@ def create_or_update_metrics_configuration(client, module): try: client.put_bucket_metrics_configuration( - aws_retry=True, - Bucket=bucket_name, - Id=mc_id, - MetricsConfiguration=new_configuration + aws_retry=True, Bucket=bucket_name, Id=mc_id, MetricsConfiguration=new_configuration ) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id) + module.fail_json_aws(e, msg=f"Failed to put bucket metrics configuration '{mc_id}'") module.exit_json(changed=True) def delete_metrics_configuration(client, module): - bucket_name = module.params.get('bucket_name') - mc_id = module.params.get('id') + bucket_name = module.params.get("bucket_name") + mc_id = module.params.get("id") try: client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code('NoSuchConfiguration'): + except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") @@ -185,39 +173,36 @@ def delete_metrics_configuration(client, module): try: client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code('NoSuchConfiguration'): + except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id) + module.fail_json_aws(e, msg=f"Failed to delete bucket metrics configuration '{mc_id}'") module.exit_json(changed=True) def main(): argument_spec = dict( - bucket_name=dict(type='str', required=True), - id=dict(type='str', required=True), - filter_prefix=dict(type='str', required=False), - filter_tags=dict(default={}, type='dict', required=False, aliases=['filter_tag']), - state=dict(default='present', type='str', choices=['present', 'absent']), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + bucket_name=dict(type="str", required=True), + id=dict(type="str", required=True), + filter_prefix=dict(type="str", required=False), + filter_tags=dict(default={}, type="dict", required=False, aliases=["filter_tag"]), + state=dict(default="present", type="str", choices=["present", "absent"]), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') + state = module.params.get("state") try: - client = module.client('s3', retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) + client = module.client("s3", retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": create_or_update_metrics_configuration(client, module) - elif state == 'absent': + elif state == "absent": delete_metrics_configuration(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/s3_sync.py b/ansible_collections/community/aws/plugins/modules/s3_sync.py index 80e3db0bd..36809ed2f 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_sync.py +++ b/ansible_collections/community/aws/plugins/modules/s3_sync.py @@ -1,31 +1,17 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: s3_sync version_added: 1.0.0 short_description: Efficiently upload multiple files to S3 description: - - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, - inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping. +- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, + inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping. options: mode: description: @@ -127,15 +113,15 @@ options: default: false type: bool -author: Ted Timmons (@tedder) +author: +- Ted Timmons (@tedder) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: basic upload community.aws.s3_sync: bucket: tedder @@ -166,9 +152,9 @@ EXAMPLES = ''' storage_class: "GLACIER" include: "*" exclude: "*.txt,.*" -''' +""" -RETURN = ''' +RETURN = r""" filelist_initial: description: file listing (dicts) from initial globbing returned: always @@ -241,7 +227,7 @@ uploads: "whytime": "1477931637 / 1477931489" }] -''' +""" import datetime import fnmatch @@ -251,6 +237,7 @@ import stat as osstat # os.stat constants try: from dateutil import tz + HAS_DATEUTIL = True except ImportError: HAS_DATEUTIL = False @@ -262,11 +249,10 @@ except ImportError: from ansible.module_utils._text import to_text -# import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.community.aws.plugins.module_utils.etag import calculate_multipart_etag +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def gather_files(fileroot, include=None, exclude=None): @@ -275,25 +261,27 @@ def gather_files(fileroot, include=None, exclude=None): if os.path.isfile(fileroot): fullpath = fileroot fstat = os.stat(fullpath) - path_array = fileroot.split('/') + path_array = fileroot.split("/") chopped_path = path_array[-1] f_size = fstat[osstat.ST_SIZE] f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append({ - 'fullpath': fullpath, - 'chopped_path': chopped_path, - 'modified_epoch': f_modified_epoch, - 'bytes': f_size, - }) + ret.append( + { + "fullpath": fullpath, + "chopped_path": chopped_path, + "modified_epoch": f_modified_epoch, + "bytes": f_size, + } + ) else: - for (dirpath, dirnames, filenames) in os.walk(fileroot): + for dirpath, dirnames, filenames in os.walk(fileroot): for fn in filenames: fullpath = os.path.join(dirpath, fn) # include/exclude if include: found = False - for x in include.split(','): + for x in include.split(","): if fnmatch.fnmatch(fn, x): found = True if not found: @@ -302,7 +290,7 @@ def gather_files(fileroot, include=None, exclude=None): if exclude: found = False - for x in exclude.split(','): + for x in exclude.split(","): if fnmatch.fnmatch(fn, x): found = True if found: @@ -313,36 +301,38 @@ def gather_files(fileroot, include=None, exclude=None): fstat = os.stat(fullpath) f_size = fstat[osstat.ST_SIZE] f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append({ - 'fullpath': fullpath, - 'chopped_path': chopped_path, - 'modified_epoch': f_modified_epoch, - 'bytes': f_size, - }) + ret.append( + { + "fullpath": fullpath, + "chopped_path": chopped_path, + "modified_epoch": f_modified_epoch, + "bytes": f_size, + } + ) # dirpath = path *to* the directory # dirnames = subdirs *in* our directory # filenames return ret -def calculate_s3_path(filelist, key_prefix=''): +def calculate_s3_path(filelist, key_prefix=""): ret = [] for fileentry in filelist: # don't modify the input dict retentry = fileentry.copy() - retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path']) + retentry["s3_path"] = os.path.join(key_prefix, fileentry["chopped_path"]) ret.append(retentry) return ret -def calculate_local_etag(filelist, key_prefix=''): - '''Really, "calculate md5", but since AWS uses their own format, we'll just call - it a "local etag". TODO optimization: only calculate if remote key exists.''' +def calculate_local_etag(filelist, key_prefix=""): + """Really, "calculate md5", but since AWS uses their own format, we'll just call + it a "local etag". TODO optimization: only calculate if remote key exists.""" ret = [] for fileentry in filelist: # don't modify the input dict retentry = fileentry.copy() - retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath']) + retentry["local_etag"] = calculate_multipart_etag(fileentry["fullpath"]) ret.append(retentry) return ret @@ -351,20 +341,20 @@ def determine_mimetypes(filelist, override_map): ret = [] for fileentry in filelist: retentry = fileentry.copy() - localfile = fileentry['fullpath'] + localfile = fileentry["fullpath"] # reminder: file extension is '.txt', not 'txt'. file_extension = os.path.splitext(localfile)[1] if override_map and override_map.get(file_extension): # override? use it. - retentry['mime_type'] = override_map[file_extension] + retentry["mime_type"] = override_map[file_extension] else: # else sniff it - retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False) + retentry["mime_type"], retentry["encoding"] = mimetypes.guess_type(localfile, strict=False) # might be None or '' from one of the above. Not a great type but better than nothing. - if not retentry['mime_type']: - retentry['mime_type'] = 'application/octet-stream' + if not retentry["mime_type"]: + retentry["mime_type"] = "application/octet-stream" ret.append(retentry) @@ -376,10 +366,10 @@ def head_s3(s3, bucket, s3keys): for entry in s3keys: retentry = entry.copy() try: - retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path']) + retentry["s3_head"] = s3.head_object(Bucket=bucket, Key=entry["s3_path"]) # 404 (Missing) - File doesn't exist, we'll need to upload # 403 (Denied) - Sometimes we can write but not read, assume we'll need to upload - except is_boto3_error_code(['404', '403']): + except is_boto3_error_code(["404", "403"]): pass retkeys.append(retentry) return retkeys @@ -389,106 +379,127 @@ def filter_list(s3, bucket, s3filelist, strategy): keeplist = list(s3filelist) for e in keeplist: - e['_strategy'] = strategy + e["_strategy"] = strategy # init/fetch info from S3 if we're going to use it for comparisons - if not strategy == 'force': + if not strategy == "force": keeplist = head_s3(s3, bucket, s3filelist) # now actually run the strategies - if strategy == 'checksum': + if strategy == "checksum": for entry in keeplist: - if entry.get('s3_head'): + if entry.get("s3_head"): # since we have a remote s3 object, compare the values. - if entry['s3_head']['ETag'] == entry['local_etag']: + if entry["s3_head"]["ETag"] == entry["local_etag"]: # files match, so remove the entry - entry['skip_flag'] = True + entry["skip_flag"] = True else: # file etags don't match, keep the entry. pass else: # we don't have an etag, so we'll keep it. pass - elif strategy == 'date_size': + elif strategy == "date_size": for entry in keeplist: - if entry.get('s3_head'): + if entry.get("s3_head"): # fstat = entry['stat'] - local_modified_epoch = entry['modified_epoch'] - local_size = entry['bytes'] + local_modified_epoch = entry["modified_epoch"] + local_size = entry["bytes"] # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward. # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp() - remote_modified_datetime = entry['s3_head']['LastModified'] - delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())) + remote_modified_datetime = entry["s3_head"]["LastModified"] + delta = remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()) remote_modified_epoch = delta.seconds + (delta.days * 86400) - remote_size = entry['s3_head']['ContentLength'] + remote_size = entry["s3_head"]["ContentLength"] - entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch) - entry['whysize'] = '{0} / {1}'.format(local_size, remote_size) + entry["whytime"] = f"{local_modified_epoch} / {remote_modified_epoch}" + entry["whysize"] = f"{local_size} / {remote_size}" if local_modified_epoch <= remote_modified_epoch and local_size == remote_size: - entry['skip_flag'] = True + entry["skip_flag"] = True else: - entry['why'] = "no s3_head" + entry["why"] = "no s3_head" # else: probably 'force'. Basically we don't skip with any with other strategies. else: pass # prune 'please skip' entries, if any. - return [x for x in keeplist if not x.get('skip_flag')] + return [x for x in keeplist if not x.get("skip_flag")] def upload_files(s3, bucket, filelist, params): ret = [] for entry in filelist: - args = { - 'ContentType': entry['mime_type'] - } - if params.get('permission'): - args['ACL'] = params['permission'] - if params.get('cache_control'): - args['CacheControl'] = params['cache_control'] - if params.get('storage_class'): - args['StorageClass'] = params['storage_class'] + args = {"ContentType": entry["mime_type"]} + if params.get("permission"): + args["ACL"] = params["permission"] + if params.get("cache_control"): + args["CacheControl"] = params["cache_control"] + if params.get("storage_class"): + args["StorageClass"] = params["storage_class"] # if this fails exception is caught in main() - s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None) + s3.upload_file(entry["fullpath"], bucket, entry["s3_path"], ExtraArgs=args, Callback=None, Config=None) ret.append(entry) return ret def remove_files(s3, sourcelist, params): - bucket = params.get('bucket') - key_prefix = params.get('key_prefix') - paginator = s3.get_paginator('list_objects_v2') - current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', [])) - keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist) + bucket = params.get("bucket") + key_prefix = params.get("key_prefix") + paginator = s3.get_paginator("list_objects_v2") + current_keys = set( + x["Key"] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get("Contents", []) + ) + keep_keys = set(to_text(source_file["s3_path"]) for source_file in sourcelist) delete_keys = list(current_keys - keep_keys) # can delete 1000 objects at a time - groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] + groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] # fmt:skip for keys in groups_of_keys: - s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]}) + s3.delete_objects(Bucket=bucket, Delete={"Objects": [{"Key": key} for key in keys]}) return delete_keys def main(): argument_spec = dict( - mode=dict(choices=['push'], default='push'), - file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'), + mode=dict(choices=["push"], default="push"), + file_change_strategy=dict(choices=["force", "date_size", "checksum"], default="date_size"), bucket=dict(required=True), - key_prefix=dict(required=False, default='', no_log=False), - file_root=dict(required=True, type='path'), - permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', - 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), - mime_map=dict(required=False, type='dict'), + key_prefix=dict(required=False, default="", no_log=False), + file_root=dict(required=True, type="path"), + permission=dict( + required=False, + choices=[ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", + ], + ), + mime_map=dict(required=False, type="dict"), exclude=dict(required=False, default=".*"), include=dict(required=False, default="*"), - cache_control=dict(required=False, default=''), - delete=dict(required=False, type='bool', default=False), - storage_class=dict(required=False, default='STANDARD', - choices=['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA', - 'INTELLIGENT_TIERING', 'GLACIER', 'DEEP_ARCHIVE', 'OUTPOSTS']), + cache_control=dict(required=False, default=""), + delete=dict(required=False, type="bool", default=False), + storage_class=dict( + required=False, + default="STANDARD", + choices=[ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE", + "OUTPOSTS", + ], + ), # future options: encoding, metadata, retries ) @@ -497,36 +508,43 @@ def main(): ) if not HAS_DATEUTIL: - module.fail_json(msg='dateutil required for this module') + module.fail_json(msg="dateutil required for this module") result = {} - mode = module.params['mode'] + mode = module.params["mode"] try: - s3 = module.client('s3') + s3 = module.client("s3") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if mode == 'push': + if mode == "push": try: - result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include']) - result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map')) - result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix']) + result["filelist_initial"] = gather_files( + module.params["file_root"], exclude=module.params["exclude"], include=module.params["include"] + ) + result["filelist_typed"] = determine_mimetypes(result["filelist_initial"], module.params.get("mime_map")) + result["filelist_s3"] = calculate_s3_path(result["filelist_typed"], module.params["key_prefix"]) try: - result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3']) + result["filelist_local_etag"] = calculate_local_etag(result["filelist_s3"]) except ValueError as e: - if module.params['file_change_strategy'] == 'checksum': - module.fail_json_aws(e, 'Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy') - result['filelist_local_etag'] = result['filelist_s3'].copy() - result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy']) - result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params) - - if module.params['delete']: - result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params) + if module.params["file_change_strategy"] == "checksum": + module.fail_json_aws( + e, + "Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy", + ) + result["filelist_local_etag"] = result["filelist_s3"].copy() + result["filelist_actionable"] = filter_list( + s3, module.params["bucket"], result["filelist_local_etag"], module.params["file_change_strategy"] + ) + result["uploads"] = upload_files(s3, module.params["bucket"], result["filelist_actionable"], module.params) + + if module.params["delete"]: + result["removed"] = remove_files(s3, result["filelist_local_etag"], module.params) # mark changed if we actually upload something. - if result.get('uploads') or result.get('removed'): - result['changed'] = True + if result.get("uploads") or result.get("removed"): + result["changed"] = True # result.update(filelist=actionable_filelist) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to push file") @@ -534,5 +552,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/s3_website.py b/ansible_collections/community/aws/plugins/modules/s3_website.py index 81d3169cd..1c212d117 100644 --- a/ansible_collections/community/aws/plugins/modules/s3_website.py +++ b/ansible_collections/community/aws/plugins/modules/s3_website.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_website version_added: 1.0.0 short_description: Configure an s3 bucket as a website description: - - Configure an s3 bucket as a website -author: Rob White (@wimnat) + - Configure an s3 bucket as a website +author: + - Rob White (@wimnat) options: name: description: @@ -44,13 +43,12 @@ options: type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Configure an s3 bucket to redirect all requests to example.com @@ -70,10 +68,9 @@ EXAMPLES = ''' suffix: home.htm error_key: errors/404.htm state: present +""" -''' - -RETURN = ''' +RETURN = r""" index_document: description: index document type: complex @@ -157,7 +154,7 @@ routing_rules: returned: when routing rule present type: str sample: documents/ -''' +""" import time @@ -168,45 +165,43 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -def _create_redirect_dict(url): +def _create_redirect_dict(url): redirect_dict = {} - url_split = url.split(':') + url_split = url.split(":") # Did we split anything? if len(url_split) == 2: - redirect_dict[u'Protocol'] = url_split[0] - redirect_dict[u'HostName'] = url_split[1].replace('//', '') + redirect_dict["Protocol"] = url_split[0] + redirect_dict["HostName"] = url_split[1].replace("//", "") elif len(url_split) == 1: - redirect_dict[u'HostName'] = url_split[0] + redirect_dict["HostName"] = url_split[0] else: - raise ValueError('Redirect URL appears invalid') + raise ValueError("Redirect URL appears invalid") return redirect_dict def _create_website_configuration(suffix, error_key, redirect_all_requests): - website_configuration = {} if error_key is not None: - website_configuration['ErrorDocument'] = {'Key': error_key} + website_configuration["ErrorDocument"] = {"Key": error_key} if suffix is not None: - website_configuration['IndexDocument'] = {'Suffix': suffix} + website_configuration["IndexDocument"] = {"Suffix": suffix} if redirect_all_requests is not None: - website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests) + website_configuration["RedirectAllRequestsTo"] = _create_redirect_dict(redirect_all_requests) return website_configuration def enable_or_update_bucket_as_website(client_connection, resource_connection, module): - bucket_name = module.params.get("name") redirect_all_requests = module.params.get("redirect_all_requests") # If redirect_all_requests is set then don't use the default suffix that has been set @@ -224,14 +219,19 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m try: website_config = client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code('NoSuchWebsiteConfiguration'): + except is_boto3_error_code("NoSuchWebsiteConfiguration"): website_config = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get website configuration") if website_config is None: try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to set bucket website configuration") @@ -239,18 +239,26 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m module.fail_json(msg=str(e)) else: try: - if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \ - (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \ - (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)): - + if ( + (suffix is not None and website_config["IndexDocument"]["Suffix"] != suffix) + or (error_key is not None and website_config["ErrorDocument"]["Key"] != error_key) + or ( + redirect_all_requests is not None + and website_config["RedirectAllRequestsTo"] != _create_redirect_dict(redirect_all_requests) + ) + ): try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") except KeyError as e: try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") @@ -265,15 +273,17 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m def disable_bucket_as_website(client_connection, module): - changed = False bucket_name = module.params.get("name") try: client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code('NoSuchWebsiteConfiguration'): + except is_boto3_error_code("NoSuchWebsiteConfiguration"): module.exit_json(changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket website") try: @@ -286,36 +296,35 @@ def disable_bucket_as_website(client_connection, module): def main(): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['present', 'absent']), - suffix=dict(type='str', required=False, default='index.html'), - error_key=dict(type='str', required=False, no_log=False), - redirect_all_requests=dict(type='str', required=False), + name=dict(type="str", required=True), + state=dict(type="str", required=True, choices=["present", "absent"]), + suffix=dict(type="str", required=False, default="index.html"), + error_key=dict(type="str", required=False, no_log=False), + redirect_all_requests=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['redirect_all_requests', 'suffix'], - ['redirect_all_requests', 'error_key'] + ["redirect_all_requests", "suffix"], + ["redirect_all_requests", "error_key"], ], ) try: - client_connection = module.client('s3') - resource_connection = module.resource('s3') + client_connection = module.client("s3") + resource_connection = module.resource("s3") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") state = module.params.get("state") - if state == 'present': + if state == "present": enable_or_update_bucket_as_website(client_connection, resource_connection, module) - elif state == 'absent': + elif state == "absent": disable_bucket_as_website(client_connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py b/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py index 851746189..fb2ff8ebe 100644 --- a/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py +++ b/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, REY Remi # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: secretsmanager_secret version_added: 1.0.0 @@ -107,16 +105,16 @@ options: - Specifies the number of days between automatic scheduled rotations of the secret. default: 30 type: int -extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws - - amazon.aws.boto3 - - amazon.aws.tags notes: - Support for I(purge_tags) was added in release 4.0.0. -''' +extends_documentation_fragment: + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add string to AWS Secrets Manager community.aws.secretsmanager_secret: name: 'test_secret_string' @@ -146,9 +144,9 @@ EXAMPLES = r''' secret_type: 'string' secret: "{{ lookup('community.general.random_string', length=16, special=false) }}" overwrite: false -''' +""" -RETURN = r''' +RETURN = r""" secret: description: The secret information returned: always @@ -212,27 +210,44 @@ secret: returned: when the secret has tags example: {'MyTagName': 'Some Value'} version_added: 4.0.0 -''' +""" -from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from traceback import format_exc import json +from traceback import format_exc try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class Secret(object): """An object representation of the Secret described by the self.module args""" + def __init__( - self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None, - tags=None, lambda_arn=None, rotation_interval=None, replica_regions=None, + self, + name, + secret_type, + secret, + resource_policy=None, + description="", + kms_key_id=None, + tags=None, + lambda_arn=None, + rotation_interval=None, + replica_regions=None, ): self.name = name self.description = description @@ -253,9 +268,7 @@ class Secret(object): @property def create_args(self): - args = { - "Name": self.name - } + args = {"Name": self.name} if self.description: args["Description"] = self.description if self.kms_key_id: @@ -264,10 +277,9 @@ class Secret(object): add_replica_regions = [] for replica in self.replica_regions: if replica["kms_key_id"]: - add_replica_regions.append({'Region': replica["region"], - 'KmsKeyId': replica["kms_key_id"]}) + add_replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) else: - add_replica_regions.append({'Region': replica["region"]}) + add_replica_regions.append({"Region": replica["region"]}) args["AddReplicaRegions"] = add_replica_regions if self.tags: args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) @@ -276,9 +288,7 @@ class Secret(object): @property def update_args(self): - args = { - "SecretId": self.name - } + args = {"SecretId": self.name} if self.description: args["Description"] = self.description if self.kms_key_id: @@ -288,9 +298,7 @@ class Secret(object): @property def secret_resource_policy_args(self): - args = { - "SecretId": self.name - } + args = {"SecretId": self.name} if self.resource_policy: args["ResourcePolicy"] = self.resource_policy return args @@ -310,7 +318,7 @@ class SecretsManagerInterface(object): def __init__(self, module): self.module = module - self.client = self.module.client('secretsmanager') + self.client = self.module.client("secretsmanager") def get_secret(self, name): try: @@ -358,7 +366,7 @@ class SecretsManagerInterface(object): try: json.loads(secret.secret_resource_policy_args.get("ResourcePolicy")) except (TypeError, ValueError) as e: - self.module.fail_json(msg="Failed to parse resource policy as JSON: %s" % (str(e)), exception=format_exc()) + self.module.fail_json(msg=f"Failed to parse resource policy as JSON: {str(e)}", exception=format_exc()) try: response = self.client.put_resource_policy(**secret.secret_resource_policy_args) @@ -371,9 +379,7 @@ class SecretsManagerInterface(object): self.module.exit_json(changed=True) try: replica_regions = [] - response = self.client.remove_regions_from_replication( - SecretId=name, - RemoveReplicaRegions=regions) + response = self.client.remove_regions_from_replication(SecretId=name, RemoveReplicaRegions=regions) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to replicate secret") return response @@ -385,12 +391,10 @@ class SecretsManagerInterface(object): replica_regions = [] for replica in regions: if replica["kms_key_id"]: - replica_regions.append({'Region': replica["region"], 'KmsKeyId': replica["kms_key_id"]}) + replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) else: - replica_regions.append({'Region': replica["region"]}) - response = self.client.replicate_secret_to_regions( - SecretId=name, - AddReplicaRegions=replica_regions) + replica_regions.append({"Region": replica["region"]}) + response = self.client.replicate_secret_to_regions(SecretId=name, AddReplicaRegions=replica_regions) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to replicate secret") return response @@ -431,7 +435,8 @@ class SecretsManagerInterface(object): response = self.client.rotate_secret( SecretId=secret.name, RotationLambdaARN=secret.rotation_lambda_arn, - RotationRules=secret.rotation_rules) + RotationRules=secret.rotation_rules, + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to rotate secret secret") else: @@ -471,7 +476,7 @@ class SecretsManagerInterface(object): if desired_secret.kms_key_id != current_secret.get("KmsKeyId"): return False current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name")) - if desired_secret.secret_type == 'SecretBinary': + if desired_secret.secret_type == "SecretBinary": desired_value = to_bytes(desired_secret.secret) else: desired_value = desired_secret.secret @@ -532,65 +537,69 @@ def compare_regions(desired_secret, current_secret): def main(): replica_args = dict( - region=dict(type='str', required=True), - kms_key_id=dict(type='str', required=False), + region=dict(type="str", required=True), + kms_key_id=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec={ - 'name': dict(required=True), - 'state': dict(choices=['present', 'absent'], default='present'), - 'overwrite': dict(type='bool', default=True), - 'description': dict(default=""), - 'replica': dict(type='list', elements='dict', options=replica_args), - 'kms_key_id': dict(), - 'secret_type': dict(choices=['binary', 'string'], default="string"), - 'secret': dict(default="", no_log=True), - 'json_secret': dict(type='json', no_log=True), - 'resource_policy': dict(type='json', default=None), - 'tags': dict(type='dict', default=None, aliases=['resource_tags']), - 'purge_tags': dict(type='bool', default=True), - 'rotation_lambda': dict(), - 'rotation_interval': dict(type='int', default=30), - 'recovery_window': dict(type='int', default=30), + "name": dict(required=True), + "state": dict(choices=["present", "absent"], default="present"), + "overwrite": dict(type="bool", default=True), + "description": dict(default=""), + "replica": dict(type="list", elements="dict", options=replica_args), + "kms_key_id": dict(), + "secret_type": dict(choices=["binary", "string"], default="string"), + "secret": dict(default="", no_log=True), + "json_secret": dict(type="json", no_log=True), + "resource_policy": dict(type="json", default=None), + "tags": dict(type="dict", default=None, aliases=["resource_tags"]), + "purge_tags": dict(type="bool", default=True), + "rotation_lambda": dict(), + "rotation_interval": dict(type="int", default=30), + "recovery_window": dict(type="int", default=30), }, - mutually_exclusive=[['secret', 'json_secret']], + mutually_exclusive=[["secret", "json_secret"]], supports_check_mode=True, ) changed = False - state = module.params.get('state') + state = module.params.get("state") secrets_mgr = SecretsManagerInterface(module) - recovery_window = module.params.get('recovery_window') + recovery_window = module.params.get("recovery_window") secret = Secret( - module.params.get('name'), - module.params.get('secret_type'), - module.params.get('secret') or module.params.get('json_secret'), - description=module.params.get('description'), - replica_regions=module.params.get('replica'), - kms_key_id=module.params.get('kms_key_id'), - resource_policy=module.params.get('resource_policy'), - tags=module.params.get('tags'), - lambda_arn=module.params.get('rotation_lambda'), - rotation_interval=module.params.get('rotation_interval') + module.params.get("name"), + module.params.get("secret_type"), + module.params.get("secret") or module.params.get("json_secret"), + description=module.params.get("description"), + replica_regions=module.params.get("replica"), + kms_key_id=module.params.get("kms_key_id"), + resource_policy=module.params.get("resource_policy"), + tags=module.params.get("tags"), + lambda_arn=module.params.get("rotation_lambda"), + rotation_interval=module.params.get("rotation_interval"), ) - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") current_secret = secrets_mgr.get_secret(secret.name) - if state == 'absent': + if state == "absent": if current_secret: if not current_secret.get("DeletedDate"): - result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + result = camel_dict_to_snake_dict( + secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) + ) changed = True elif current_secret.get("DeletedDate") and recovery_window == 0: - result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + result = camel_dict_to_snake_dict( + secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) + ) changed = True else: result = "secret already scheduled for deletion" else: result = "secret does not exist" - if state == 'present': + if state == "present": if current_secret is None: result = secrets_mgr.create_secret(secret) if secret.resource_policy and result.get("ARN"): @@ -602,7 +611,7 @@ def main(): secrets_mgr.restore_secret(secret.name) changed = True if not secrets_mgr.secrets_match(secret, current_secret): - overwrite = module.params.get('overwrite') + overwrite = module.params.get("overwrite") if overwrite: result = secrets_mgr.update_secret(secret) changed = True @@ -619,8 +628,8 @@ def main(): result = secrets_mgr.put_resource_policy(secret) changed = True - if module.params.get('tags') is not None: - current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) + if module.params.get("tags") is not None: + current_tags = boto3_tag_list_to_ansible_dict(current_secret.get("Tags", [])) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags) if tags_to_add: secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add)) @@ -638,12 +647,12 @@ def main(): changed = True result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) - if result.get('tags', None) is not None: - result['tags_dict'] = boto3_tag_list_to_ansible_dict(result.get('tags', [])) + if result.get("tags", None) is not None: + result["tags_dict"] = boto3_tag_list_to_ansible_dict(result.get("tags", [])) result.pop("response_metadata") module.exit_json(changed=changed, secret=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ses_identity.py b/ansible_collections/community/aws/plugins/modules/ses_identity.py index 997692df6..785519bd3 100644 --- a/ansible_collections/community/aws/plugins/modules/ses_identity.py +++ b/ansible_collections/community/aws/plugins/modules/ses_identity.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ses_identity version_added: 1.0.0 @@ -86,14 +84,14 @@ options: - Whether or not to enable feedback forwarding. - This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics. type: 'bool' - default: True + default: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Ensure example@example.com email identity exists @@ -117,7 +115,7 @@ EXAMPLES = ''' community.aws.sns_topic: name: "complaints-topic" state: present - purge_subscriptions: False + purge_subscriptions: false register: topic_info - name: Deliver feedback to topic instead of owner email @@ -126,11 +124,11 @@ EXAMPLES = ''' state: present complaint_notifications: topic: "{{ topic_info.sns_arn }}" - include_headers: True + include_headers: true bounce_notifications: topic: "{{ topic_info.sns_arn }}" - include_headers: False - feedback_forwarding: False + include_headers: false + feedback_forwarding: false # Create an SNS topic for delivery notifications and leave complaints # Being forwarded to the identity owner email @@ -138,7 +136,7 @@ EXAMPLES = ''' community.aws.sns_topic: name: "delivery-notifications-topic" state: present - purge_subscriptions: False + purge_subscriptions: false register: topic_info - name: Delivery notifications to topic @@ -147,9 +145,9 @@ EXAMPLES = ''' state: present delivery_notifications: topic: "{{ topic_info.sns_arn }}" -''' +""" -RETURN = ''' +RETURN = r""" identity: description: The identity being modified. returned: success @@ -217,19 +215,22 @@ notification_attributes: headers_in_delivery_notifications_enabled: description: Whether or not headers are included in messages delivered to the delivery topic. type: bool -''' - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +""" import time try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10): # Unpredictably get_identity_verification_attributes doesn't include the identity even when we've @@ -241,8 +242,8 @@ def get_verification_attributes(connection, module, identity, retries=0, retryDe try: response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity)) - identity_verification = response['VerificationAttributes'] + module.fail_json_aws(e, msg=f"Failed to retrieve identity verification attributes for {identity}") + identity_verification = response["VerificationAttributes"] if identity in identity_verification: break time.sleep(retryDelay) @@ -262,8 +263,8 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel try: response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity)) - notification_attributes = response['NotificationAttributes'] + module.fail_json_aws(e, msg=f"Failed to retrieve identity notification attributes for {identity}") + notification_attributes = response["NotificationAttributes"] # No clear AWS docs on when this happens, but it appears sometimes identities are not included in # in the notification attributes when the identity is first registered. Suspect that this is caused by @@ -279,7 +280,7 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel # something has gone very wrong. if len(notification_attributes) != 0: module.fail_json( - msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format( + msg="Unexpected identity found in notification attributes, expected {0} but got {1!r}.".format( identity, notification_attributes.keys(), ) @@ -291,46 +292,60 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel def desired_topic(module, notification_type): - arg_dict = module.params.get(notification_type.lower() + '_notifications') + arg_dict = module.params.get(notification_type.lower() + "_notifications") if arg_dict: - return arg_dict.get('topic', None) + return arg_dict.get("topic", None) else: return None def update_notification_topic(connection, module, identity, identity_notifications, notification_type): - topic_key = notification_type + 'Topic' + # Not passing the parameter should not cause any changes. + if module.params.get(f"{notification_type.lower()}_notifications") is None: + return False + + topic_key = notification_type + "Topic" if identity_notifications is None: # If there is no configuration for notifications cannot be being sent to topics # hence assume None as the current state. - current = None + current_topic = None elif topic_key in identity_notifications: - current = identity_notifications[topic_key] + current_topic = identity_notifications[topic_key] else: # If there is information on the notifications setup but no information on the # particular notification topic it's pretty safe to assume there's no topic for # this notification. AWS API docs suggest this information will always be # included but best to be defensive - current = None + current_topic = None - required = desired_topic(module, notification_type) + required_topic = desired_topic(module, notification_type) - if current != required: + if current_topic != required_topic: try: if not module.check_mode: - connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True) + request_kwargs = { + "Identity": identity, + "NotificationType": notification_type, + "aws_retry": True, + } + + # The topic has to be omitted from the request to disable the notification. + if required_topic is not None: + request_kwargs["SnsTopic"] = required_topic + + connection.set_identity_notification_topic(**request_kwargs) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format( - identity=identity, - notification_type=notification_type, - )) + module.fail_json_aws( + e, + msg=f"Failed to set identity notification topic for {identity} {notification_type}", + ) return True return False def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type): - arg_dict = module.params.get(notification_type.lower() + '_notifications') - header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' + arg_dict = module.params.get(notification_type.lower() + "_notifications") + header_key = "HeadersIn" + notification_type + "NotificationsEnabled" if identity_notifications is None: # If there is no configuration for topic notifications, headers cannot be being # forwarded, hence assume false. @@ -343,21 +358,21 @@ def update_notification_topic_headers(connection, module, identity, identity_not # headers are not included since most API consumers would interpret absence as false. current = False - if arg_dict is not None and 'include_headers' in arg_dict: - required = arg_dict['include_headers'] + if arg_dict is not None and "include_headers" in arg_dict: + required = arg_dict["include_headers"] else: required = False if current != required: try: if not module.check_mode: - connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required, - aws_retry=True) + connection.set_identity_headers_in_notifications_enabled( + Identity=identity, NotificationType=notification_type, Enabled=required, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format( - identity=identity, - notification_type=notification_type, - )) + module.fail_json_aws( + e, msg=f"Failed to set identity headers in notification for {identity} {notification_type}" + ) return True return False @@ -368,51 +383,55 @@ def update_feedback_forwarding(connection, module, identity, identity_notificati # are being handled by SNS topics. So in the absence of identity_notifications # information existing feedback forwarding must be on. current = True - elif 'ForwardingEnabled' in identity_notifications: - current = identity_notifications['ForwardingEnabled'] + elif "ForwardingEnabled" in identity_notifications: + current = identity_notifications["ForwardingEnabled"] else: # If there is information on the notifications setup but no information on the # forwarding state it's pretty safe to assume forwarding is off. AWS API docs # suggest this information will always be included but best to be defensive current = False - required = module.params.get('feedback_forwarding') + required = module.params.get("feedback_forwarding") if current != required: try: if not module.check_mode: - connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True) + connection.set_identity_feedback_forwarding_enabled( + Identity=identity, ForwardingEnabled=required, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity)) + module.fail_json_aws(e, msg=f"Failed to set identity feedback forwarding for {identity}") return True return False def create_mock_notifications_response(module): resp = { - "ForwardingEnabled": module.params.get('feedback_forwarding'), + "ForwardingEnabled": module.params.get("feedback_forwarding"), } - for notification_type in ('Bounce', 'Complaint', 'Delivery'): - arg_dict = module.params.get(notification_type.lower() + '_notifications') - if arg_dict is not None and 'topic' in arg_dict: - resp[notification_type + 'Topic'] = arg_dict['topic'] - - header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' - if arg_dict is not None and 'include_headers' in arg_dict: - resp[header_key] = arg_dict['include_headers'] + for notification_type in ("Bounce", "Complaint", "Delivery"): + arg_dict = module.params.get(notification_type.lower() + "_notifications") + if arg_dict is not None and "topic" in arg_dict: + resp[notification_type + "Topic"] = arg_dict["topic"] + + header_key = "HeadersIn" + notification_type + "NotificationsEnabled" + if arg_dict is not None and "include_headers" in arg_dict: + resp[header_key] = arg_dict["include_headers"] else: resp[header_key] = False return resp def update_identity_notifications(connection, module): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False identity_notifications = get_identity_notifications(connection, module, identity) - for notification_type in ('Bounce', 'Complaint', 'Delivery'): + for notification_type in ("Bounce", "Complaint", "Delivery"): changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type) - changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type) + changed |= update_notification_topic_headers( + connection, module, identity, identity_notifications, notification_type + ) changed |= update_feedback_forwarding(connection, module, identity, identity_notifications) @@ -425,25 +444,29 @@ def update_identity_notifications(connection, module): def validate_params_for_identity_present(module): - if module.params.get('feedback_forwarding') is False: - if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')): - module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " - "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics") + if module.params.get("feedback_forwarding") is False: + if not (desired_topic(module, "Bounce") and desired_topic(module, "Complaint")): + module.fail_json( + msg=( + "Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " + "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" + ) + ) def create_or_update_identity(connection, module, region, account_id): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False verification_attributes = get_verification_attributes(connection, module, identity) if verification_attributes is None: try: if not module.check_mode: - if '@' in identity: + if "@" in identity: connection.verify_email_identity(EmailAddress=identity, aws_retry=True) else: connection.verify_domain_identity(Domain=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity)) + module.fail_json_aws(e, msg=f"Failed to verify identity {identity}") if module.check_mode: verification_attributes = { "VerificationStatus": "Pending", @@ -451,20 +474,22 @@ def create_or_update_identity(connection, module, region, account_id): else: verification_attributes = get_verification_attributes(connection, module, identity, retries=4) changed = True - elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'): - module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'], - verification_attributes=camel_dict_to_snake_dict(verification_attributes)) + elif verification_attributes["VerificationStatus"] not in ("Pending", "Success"): + module.fail_json( + msg="Identity " + identity + " in bad status " + verification_attributes["VerificationStatus"], + verification_attributes=camel_dict_to_snake_dict(verification_attributes), + ) if verification_attributes is None: - module.fail_json(msg='Unable to load identity verification attributes after registering identity.') + module.fail_json(msg="Unable to load identity verification attributes after registering identity.") notifications_changed, notification_attributes = update_identity_notifications(connection, module) changed |= notifications_changed if notification_attributes is None: - module.fail_json(msg='Unable to load identity notification attributes.') + module.fail_json(msg="Unable to load identity notification attributes.") - identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity + identity_arn = "arn:aws:ses:" + region + ":" + account_id + ":identity/" + identity module.exit_json( changed=changed, @@ -476,7 +501,7 @@ def create_or_update_identity(connection, module, region, account_id): def destroy_identity(connection, module): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False verification_attributes = get_verification_attributes(connection, module, identity) if verification_attributes is not None: @@ -484,7 +509,7 @@ def destroy_identity(connection, module): if not module.check_mode: connection.delete_identity(Identity=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity)) + module.fail_json_aws(e, msg=f"Failed to delete identity {identity}") changed = True module.exit_json( @@ -494,44 +519,50 @@ def destroy_identity(connection, module): def get_account_id(module): - sts = module.client('sts') + sts = module.client("sts") try: caller_identity = sts.get_caller_identity() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve caller identity') - return caller_identity['Account'] + module.fail_json_aws(e, msg="Failed to retrieve caller identity") + return caller_identity["Account"] def main(): module = AnsibleAWSModule( argument_spec={ - "identity": dict(required=True, type='str'), - "state": dict(default='present', choices=['present', 'absent']), - "bounce_notifications": dict(type='dict'), - "complaint_notifications": dict(type='dict'), - "delivery_notifications": dict(type='dict'), - "feedback_forwarding": dict(default=True, type='bool'), + "identity": dict(required=True, type="str"), + "state": dict(default="present", choices=["present", "absent"]), + "bounce_notifications": dict(type="dict"), + "complaint_notifications": dict(type="dict"), + "delivery_notifications": dict(type="dict"), + "feedback_forwarding": dict(default=True, type="bool"), }, supports_check_mode=True, ) - for notification_type in ('bounce', 'complaint', 'delivery'): - param_name = notification_type + '_notifications' + for notification_type in ("bounce", "complaint", "delivery"): + param_name = notification_type + "_notifications" arg_dict = module.params.get(param_name) if arg_dict: - extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')] + extra_keys = [x for x in arg_dict.keys() if x not in ("topic", "include_headers")] if extra_keys: - module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') + module.fail_json( + msg="Unexpected keys " + + str(extra_keys) + + " in " + + param_name + + " valid keys are topic or include_headers" + ) # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": region = module.region account_id = get_account_id(module) validate_params_for_identity_present(module) @@ -540,5 +571,5 @@ def main(): destroy_identity(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py b/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py index 16d9f1ded..9b7a3d6b6 100644 --- a/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py +++ b/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ses_identity_policy version_added: 1.0.0 @@ -41,12 +39,12 @@ options: choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: add sending authorization policy to domain identity @@ -75,42 +73,45 @@ EXAMPLES = ''' identity: example.com policy_name: ExamplePolicy state: absent -''' +""" -RETURN = ''' +RETURN = r""" policies: description: A list of all policies present on the identity after the operation. returned: success type: list sample: [ExamplePolicy] -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry +""" import json try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def get_identity_policy(connection, module, identity, policy_name): try: response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name)) - policies = response['Policies'] + module.fail_json_aws(e, msg=f"Failed to retrieve identity policy {policy_name}") + policies = response["Policies"] if policy_name in policies: return policies[policy_name] return None def create_or_update_identity_policy(connection, module): - identity = module.params.get('identity') - policy_name = module.params.get('policy_name') - required_policy = module.params.get('policy') + identity = module.params.get("identity") + policy_name = module.params.get("policy_name") + required_policy = module.params.get("policy") required_policy_dict = json.loads(required_policy) changed = False @@ -120,9 +121,11 @@ def create_or_update_identity_policy(connection, module): changed = True try: if not module.check_mode: - connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True) + connection.put_identity_policy( + Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to put identity policy {policy_name}") # Load the list of applied policies to include in the response. # In principle we should be able to just return the response, but given @@ -133,9 +136,9 @@ def create_or_update_identity_policy(connection, module): # # As a nice side benefit this also means the return is correct in check mode try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to list identity policies') + module.fail_json_aws(e, msg="Failed to list identity policies") if policy_name is not None and policy_name not in policies_present: policies_present = list(policies_present) policies_present.append(policy_name) @@ -146,20 +149,20 @@ def create_or_update_identity_policy(connection, module): def delete_identity_policy(connection, module): - identity = module.params.get('identity') - policy_name = module.params.get('policy_name') + identity = module.params.get("identity") + policy_name = module.params.get("policy_name") changed = False try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to list identity policies') + module.fail_json_aws(e, msg="Failed to list identity policies") if policy_name in policies_present: try: if not module.check_mode: connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to delete identity policy {policy_name}") changed = True policies_present = list(policies_present) policies_present.remove(policy_name) @@ -173,12 +176,12 @@ def delete_identity_policy(connection, module): def main(): module = AnsibleAWSModule( argument_spec={ - 'identity': dict(required=True, type='str'), - 'state': dict(default='present', choices=['present', 'absent']), - 'policy_name': dict(required=True, type='str'), - 'policy': dict(type='json', default=None), + "identity": dict(required=True, type="str"), + "state": dict(default="present", choices=["present", "absent"]), + "policy_name": dict(required=True, type="str"), + "policy": dict(type="json", default=None), }, - required_if=[['state', 'present', ['policy']]], + required_if=[["state", "present", ["policy"]]], supports_check_mode=True, ) @@ -186,15 +189,15 @@ def main(): # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_identity_policy(connection, module) else: delete_identity_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ses_rule_set.py b/ansible_collections/community/aws/plugins/modules/ses_rule_set.py index b42ac8088..cf478c0f9 100644 --- a/ansible_collections/community/aws/plugins/modules/ses_rule_set.py +++ b/ansible_collections/community/aws/plugins/modules/ses_rule_set.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017, Ben Tomasik <ben@tomasik.io> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ses_rule_set version_added: 1.0.0 @@ -46,15 +44,14 @@ options: required: False default: False extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. ---- - name: Create default rule set and activate it if not already community.aws.ses_rule_set: name: default-rule-set @@ -84,7 +81,7 @@ EXAMPLES = """ force: true """ -RETURN = """ +RETURN = r""" active: description: if the SES rule set is active returned: success if I(state) is C(present) @@ -100,25 +97,29 @@ rule_sets: }] """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry - try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def list_rule_sets(client, module): try: response = client.list_receipt_rule_sets(aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't list rule sets.") - return response['RuleSets'] + return response["RuleSets"] def rule_set_in(name, rule_sets): - return any(s for s in rule_sets if s['Name'] == name) + return any(s for s in rule_sets if s["Name"] == name) def ruleset_active(client, module, name): @@ -126,8 +127,8 @@ def ruleset_active(client, module, name): active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't get the active rule set.") - if active_rule_set is not None and 'Metadata' in active_rule_set: - return name == active_rule_set['Metadata']['Name'] + if active_rule_set is not None and "Metadata" in active_rule_set: + return name == active_rule_set["Metadata"]["Name"] else: # Metadata was not set meaning there is no active rule set return False @@ -153,7 +154,7 @@ def update_active_rule_set(client, module, name, desired_active): try: client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't set active rule set to {name}.") changed = True active = True elif not desired_active and active: @@ -165,7 +166,7 @@ def update_active_rule_set(client, module, name, desired_active): def create_or_update_rule_set(client, module): - name = module.params.get('name') + name = module.params.get("name") check_mode = module.check_mode changed = False @@ -175,14 +176,16 @@ def create_or_update_rule_set(client, module): try: client.create_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't create rule set {name}.") changed = True rule_sets = list(rule_sets) - rule_sets.append({ - 'Name': name, - }) + rule_sets.append( + { + "Name": name, + } + ) - (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active')) + (active_changed, active) = update_active_rule_set(client, module, name, module.params.get("active")) changed |= active_changed module.exit_json( @@ -193,30 +196,33 @@ def create_or_update_rule_set(client, module): def remove_rule_set(client, module): - name = module.params.get('name') + name = module.params.get("name") check_mode = module.check_mode changed = False rule_sets = list_rule_sets(client, module) if rule_set_in(name, rule_sets): active = ruleset_active(client, module, name) - if active and not module.params.get('force'): + if active and not module.params.get("force"): module.fail_json( - msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name), + msg=( + f"Couldn't delete rule set {name} because it is currently active. Set force=true to delete an" + " active ruleset." + ), error={ "code": "CannotDelete", - "message": "Cannot delete active rule set: {0}".format(name), - } + "message": f"Cannot delete active rule set: {name}", + }, ) if not check_mode: - if active and module.params.get('force'): + if active and module.params.get("force"): deactivate_rule_set(client, module) try: client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't delete rule set {name}.") changed = True - rule_sets = [x for x in rule_sets if x['Name'] != name] + rule_sets = [x for x in rule_sets if x["Name"] != name] module.exit_json( changed=changed, @@ -226,27 +232,27 @@ def remove_rule_set(client, module): def main(): argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - active=dict(type='bool'), - force=dict(type='bool', default=False), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + active=dict(type="bool"), + force=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') + state = module.params.get("state") # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) - if state == 'absent': + if state == "absent": remove_rule_set(client, module) else: create_or_update_rule_set(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/sns.py b/ansible_collections/community/aws/plugins/modules/sns.py index f72bbfa49..62c440c1f 100644 --- a/ansible_collections/community/aws/plugins/modules/sns.py +++ b/ansible_collections/community/aws/plugins/modules/sns.py @@ -4,11 +4,7 @@ # Copyright: (c) 2014, Michael J. Schultz <mjschultz@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: sns short_description: Send Amazon Simple Notification Service messages version_added: 1.0.0 @@ -96,12 +92,12 @@ options: version_added: 5.4.0 extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws -- amazon.aws.boto3 -''' + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send default notification message via SNS community.aws.sns: msg: '{{ inventory_hostname }} has completed the play.' @@ -139,7 +135,7 @@ EXAMPLES = """ delegate_to: localhost """ -RETURN = """ +RETURN = r""" msg: description: Human-readable diagnostic information returned: always @@ -159,32 +155,33 @@ sequence_number: import json try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: - pass # Handled by AnsibleAWSModule + pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup def main(): protocols = [ - 'http', - 'https', - 'email', - 'email_json', - 'sms', - 'sqs', - 'application', - 'lambda', + "http", + "https", + "email", + "email_json", + "sms", + "sqs", + "application", + "lambda", ] argument_spec = dict( - msg=dict(required=True, aliases=['default']), + msg=dict(required=True, aliases=["default"]), subject=dict(), topic=dict(required=True), - message_attributes=dict(type='dict'), - message_structure=dict(choices=['json', 'string'], default='json'), + message_attributes=dict(type="dict"), + message_structure=dict(choices=["json", "string"], default="json"), message_group_id=dict(), message_deduplication_id=dict(), ) @@ -195,50 +192,48 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) sns_kwargs = dict( - Message=module.params['msg'], - Subject=module.params['subject'], - MessageStructure=module.params['message_structure'], + Message=module.params["msg"], + Subject=module.params["subject"], + MessageStructure=module.params["message_structure"], ) - if module.params['message_attributes']: - if module.params['message_structure'] != 'string': + if module.params["message_attributes"]: + if module.params["message_structure"] != "string": module.fail_json(msg='message_attributes is only supported when the message_structure is "string".') - sns_kwargs['MessageAttributes'] = module.params['message_attributes'] + sns_kwargs["MessageAttributes"] = module.params["message_attributes"] if module.params["message_group_id"]: sns_kwargs["MessageGroupId"] = module.params["message_group_id"] if module.params["message_deduplication_id"]: sns_kwargs["MessageDeduplicationId"] = module.params["message_deduplication_id"] - dict_msg = { - 'default': sns_kwargs['Message'] - } + dict_msg = {"default": sns_kwargs["Message"]} for p in protocols: if module.params[p]: - if sns_kwargs['MessageStructure'] != 'json': + if sns_kwargs["MessageStructure"] != "json": module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".') - dict_msg[p.replace('_', '-')] = module.params[p] + dict_msg[p.replace("_", "-")] = module.params[p] - client = module.client('sns') + client = module.client("sns") - topic = module.params['topic'] - if ':' in topic: + topic = module.params["topic"] + if ":" in topic: # Short names can't contain ':' so we'll assume this is the full ARN - sns_kwargs['TopicArn'] = topic + sns_kwargs["TopicArn"] = topic else: - sns_kwargs['TopicArn'] = topic_arn_lookup(client, module, topic) + sns_kwargs["TopicArn"] = topic_arn_lookup(client, module, topic) - if not sns_kwargs['TopicArn']: - module.fail_json(msg='Could not find topic: {0}'.format(topic)) + if not sns_kwargs["TopicArn"]: + module.fail_json(msg=f"Could not find topic: {topic}") - if sns_kwargs['MessageStructure'] == 'json': - sns_kwargs['Message'] = json.dumps(dict_msg) + if sns_kwargs["MessageStructure"] == "json": + sns_kwargs["Message"] = json.dumps(dict_msg) try: result = client.publish(**sns_kwargs) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to publish message') + module.fail_json_aws(e, msg="Failed to publish message") sns_result = dict(msg="OK", message_id=result["MessageId"]) @@ -248,5 +243,5 @@ def main(): module.exit_json(**sns_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/sns_topic.py b/ansible_collections/community/aws/plugins/modules/sns_topic.py index 3c05be004..0fe7fbe33 100644 --- a/ansible_collections/community/aws/plugins/modules/sns_topic.py +++ b/ansible_collections/community/aws/plugins/modules/sns_topic.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: sns_topic short_description: Manages AWS SNS topics and subscriptions version_added: 1.0.0 @@ -159,11 +156,11 @@ options: notes: - Support for I(tags) and I(purge_tags) was added in release 5.3.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules - amazon.aws.boto3 -''' +""" EXAMPLES = r""" @@ -182,7 +179,7 @@ EXAMPLES = r""" numMinDelayRetries: 2 numNoDelayRetries: 2 backoffFunction: "linear" - disableSubscriptionOverrides: True + disableSubscriptionOverrides: true defaultThrottlePolicy: maxReceivesPerSecond: 10 subscriptions: @@ -216,7 +213,7 @@ EXAMPLES = r""" state: absent """ -RETURN = r''' +RETURN = r""" sns_arn: description: The ARN of the topic you are modifying type: str @@ -332,7 +329,7 @@ sns_topic: returned: always type: bool sample: false -''' +""" import json @@ -341,38 +338,41 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.community.aws.plugins.module_utils.sns import list_topics -from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup -from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies -from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions +from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint +from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies from ansible_collections.community.aws.plugins.module_utils.sns import get_info +from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions +from ansible_collections.community.aws.plugins.module_utils.sns import list_topics +from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup from ansible_collections.community.aws.plugins.module_utils.sns import update_tags class SnsTopicManager(object): - """ Handles SNS Topic creation and destruction """ - - def __init__(self, - module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode): - - self.connection = module.client('sns') + """Handles SNS Topic creation and destruction""" + + def __init__( + self, + module, + name, + topic_type, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + tags, + purge_tags, + content_based_deduplication, + check_mode, + ): + self.connection = module.client("sns") self.module = module self.name = name self.topic_type = topic_type @@ -402,73 +402,80 @@ class SnsTopicManager(object): # NOTE: Never set FifoTopic = False. Some regions (including GovCloud) # don't support the attribute being set, even to False. - if self.topic_type == 'fifo': - attributes['FifoTopic'] = 'true' - if not self.name.endswith('.fifo'): - self.name = self.name + '.fifo' + if self.topic_type == "fifo": + attributes["FifoTopic"] = "true" + if not self.name.endswith(".fifo"): + self.name = self.name + ".fifo" if self.tags: tags = ansible_dict_to_boto3_tag_list(self.tags) if not self.check_mode: try: - response = self.connection.create_topic(Name=self.name, - Attributes=attributes, - Tags=tags) + response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) - self.topic_arn = response['TopicArn'] + self.module.fail_json_aws(e, msg=f"Couldn't create topic {self.name}") + self.topic_arn = response["TopicArn"] return True def _set_topic_attrs(self): changed = False try: - topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes'] + topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)["Attributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't get topic attributes for topic {self.topic_arn}") - if self.display_name and self.display_name != topic_attributes['DisplayName']: + if self.display_name and self.display_name != topic_attributes["DisplayName"]: changed = True - self.attributes_set.append('display_name') + self.attributes_set.append("display_name") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName', - AttributeValue=self.display_name) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="DisplayName", AttributeValue=self.display_name + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set display name") - if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])): + if self.policy and compare_policies(self.policy, json.loads(topic_attributes["Policy"])): changed = True - self.attributes_set.append('policy') + self.attributes_set.append("policy") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy', - AttributeValue=json.dumps(self.policy)) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="Policy", AttributeValue=json.dumps(self.policy) + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set topic policy") # Set content-based deduplication attribute. Ignore if topic_type is not fifo. - if ("FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true") and \ - self.content_based_deduplication: - enabled = "true" if self.content_based_deduplication in 'enabled' else "false" - if enabled != topic_attributes['ContentBasedDeduplication']: + if ( + "FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true" + ) and self.content_based_deduplication: + enabled = "true" if self.content_based_deduplication in "enabled" else "false" + if enabled != topic_attributes["ContentBasedDeduplication"]: changed = True - self.attributes_set.append('content_based_deduplication') + self.attributes_set.append("content_based_deduplication") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='ContentBasedDeduplication', - AttributeValue=enabled) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="ContentBasedDeduplication", AttributeValue=enabled + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set content-based deduplication") - if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or - compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): + if self.delivery_policy and ( + "DeliveryPolicy" not in topic_attributes + or compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes["DeliveryPolicy"])) + ): changed = True - self.attributes_set.append('delivery_policy') + self.attributes_set.append("delivery_policy") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy', - AttributeValue=json.dumps(self.delivery_policy)) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, + AttributeName="DeliveryPolicy", + AttributeValue=json.dumps(self.delivery_policy), + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy") return changed @@ -476,20 +483,23 @@ class SnsTopicManager(object): def _set_topic_subs(self): changed = False subscriptions_existing_list = set() - desired_subscriptions = [(sub['protocol'], - canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in - self.subscriptions] + desired_subscriptions = [ + (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) for sub in self.subscriptions + ] for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub['Protocol'], sub['Endpoint']) + sub_key = (sub["Protocol"], sub["Endpoint"]) subscriptions_existing_list.add(sub_key) - if (self.purge_subscriptions and sub_key not in desired_subscriptions and - sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')): + if ( + self.purge_subscriptions + and sub_key not in desired_subscriptions + and sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted") + ): changed = True self.subscriptions_deleted.append(sub_key) if not self.check_mode: try: - self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") @@ -500,13 +510,13 @@ class SnsTopicManager(object): try: self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't subscribe to topic {self.topic_arn}") return changed def _init_desired_subscription_attributes(self): for sub in self.subscriptions: - sub_key = (sub['protocol'], canonicalize_endpoint(sub['protocol'], sub['endpoint'])) - tmp_dict = sub.get('attributes', {}) + sub_key = (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) + tmp_dict = sub.get("attributes", {}) # aws sdk expects values to be strings # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes for k, v in tmp_dict.items(): @@ -517,26 +527,28 @@ class SnsTopicManager(object): def _set_topic_subs_attributes(self): changed = False for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub['Protocol'], sub['Endpoint']) - sub_arn = sub['SubscriptionArn'] + sub_key = (sub["Protocol"], sub["Endpoint"]) + sub_arn = sub["SubscriptionArn"] if not self.desired_subscription_attributes.get(sub_key): # subscription attributes aren't defined in desired, skipping continue try: - sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)['Attributes'] + sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)[ + "Attributes" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn) + self.module.fail_json_aws(e, f"Couldn't get subscription attributes for subscription {sub_arn}") - raw_message = self.desired_subscription_attributes[sub_key].get('RawMessageDelivery') - if raw_message is not None and 'RawMessageDelivery' in sub_current_attributes: - if sub_current_attributes['RawMessageDelivery'].lower() != raw_message.lower(): + raw_message = self.desired_subscription_attributes[sub_key].get("RawMessageDelivery") + if raw_message is not None and "RawMessageDelivery" in sub_current_attributes: + if sub_current_attributes["RawMessageDelivery"].lower() != raw_message.lower(): changed = True if not self.check_mode: try: - self.connection.set_subscription_attributes(SubscriptionArn=sub_arn, - AttributeName='RawMessageDelivery', - AttributeValue=raw_message) + self.connection.set_subscription_attributes( + SubscriptionArn=sub_arn, AttributeName="RawMessageDelivery", AttributeValue=raw_message + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute") @@ -549,11 +561,11 @@ class SnsTopicManager(object): if not subscriptions: return False for sub in subscriptions: - if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'): - self.subscriptions_deleted.append(sub['SubscriptionArn']) + if sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted"): + self.subscriptions_deleted.append(sub["SubscriptionArn"]) if not self.check_mode: try: - self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") return True @@ -564,11 +576,11 @@ class SnsTopicManager(object): try: self.connection.delete_topic(TopicArn=self.topic_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't delete topic {self.topic_arn}") return True def _name_is_arn(self): - return self.name.startswith('arn:') + return bool(parse_aws_arn(self.name)) def ensure_ok(self): changed = False @@ -578,7 +590,9 @@ class SnsTopicManager(object): if self.topic_arn in list_topics(self.connection, self.module): changed |= self._set_topic_attrs() elif self.display_name or self.policy or self.delivery_policy: - self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") + self.module.fail_json( + msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account" + ) changed |= self._set_topic_subs() self._init_desired_subscription_attributes() if self.topic_arn in list_topics(self.connection, self.module): @@ -595,7 +609,9 @@ class SnsTopicManager(object): self.populate_topic_arn() if self.topic_arn: if self.topic_arn not in list_topics(self.connection, self.module): - self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe") + self.module.fail_json( + msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe" + ) changed = self._delete_subscriptions() changed |= self._delete_topic() return changed @@ -606,7 +622,7 @@ class SnsTopicManager(object): return name = self.name - if self.topic_type == 'fifo' and not name.endswith('.fifo'): + if self.topic_type == "fifo" and not name.endswith(".fifo"): name += ".fifo" self.topic_arn = topic_arn_lookup(self.connection, self.module, name) @@ -615,83 +631,87 @@ def main(): # We're kinda stuck with CamelCase here, it would be nice to switch to # snake_case, but we'd need to purge out the alias entries http_retry_args = dict( - minDelayTarget=dict(type='int', required=True), - maxDelayTarget=dict(type='int', required=True), - numRetries=dict(type='int', required=True), - numMaxDelayRetries=dict(type='int', required=True), - numMinDelayRetries=dict(type='int', required=True), - numNoDelayRetries=dict(type='int', required=True), - backoffFunction=dict(type='str', required=True, choices=['arithmetic', 'exponential', 'geometric', 'linear']), + minDelayTarget=dict(type="int", required=True), + maxDelayTarget=dict(type="int", required=True), + numRetries=dict(type="int", required=True), + numMaxDelayRetries=dict(type="int", required=True), + numMinDelayRetries=dict(type="int", required=True), + numNoDelayRetries=dict(type="int", required=True), + backoffFunction=dict(type="str", required=True, choices=["arithmetic", "exponential", "geometric", "linear"]), ) http_delivery_args = dict( - defaultHealthyRetryPolicy=dict(type='dict', required=True, options=http_retry_args), - disableSubscriptionOverrides=dict(type='bool', required=False), + defaultHealthyRetryPolicy=dict(type="dict", required=True, options=http_retry_args), + disableSubscriptionOverrides=dict(type="bool", required=False), defaultThrottlePolicy=dict( - type='dict', required=False, + type="dict", + required=False, options=dict( - maxReceivesPerSecond=dict(type='int', required=True), + maxReceivesPerSecond=dict(type="int", required=True), ), ), ) delivery_args = dict( - http=dict(type='dict', required=False, options=http_delivery_args), + http=dict(type="dict", required=False, options=http_delivery_args), ) argument_spec = dict( name=dict(required=True), - topic_type=dict(type='str', default='standard', choices=['standard', 'fifo']), - state=dict(default='present', choices=['present', 'absent']), + topic_type=dict(type="str", default="standard", choices=["standard", "fifo"]), + state=dict(default="present", choices=["present", "absent"]), display_name=dict(), - policy=dict(type='dict'), - delivery_policy=dict(type='dict', options=delivery_args), - subscriptions=dict(default=[], type='list', elements='dict'), - purge_subscriptions=dict(type='bool', default=True), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - content_based_deduplication=dict(choices=['enabled', 'disabled']) + policy=dict(type="dict"), + delivery_policy=dict(type="dict", options=delivery_args), + subscriptions=dict(default=[], type="list", elements="dict"), + purge_subscriptions=dict(type="bool", default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + content_based_deduplication=dict(choices=["enabled", "disabled"]), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - - name = module.params.get('name') - topic_type = module.params.get('topic_type') - state = module.params.get('state') - display_name = module.params.get('display_name') - policy = module.params.get('policy') - delivery_policy = module.params.get('delivery_policy') - subscriptions = module.params.get('subscriptions') - purge_subscriptions = module.params.get('purge_subscriptions') - content_based_deduplication = module.params.get('content_based_deduplication') + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + name = module.params.get("name") + topic_type = module.params.get("topic_type") + state = module.params.get("state") + display_name = module.params.get("display_name") + policy = module.params.get("policy") + delivery_policy = module.params.get("delivery_policy") + subscriptions = module.params.get("subscriptions") + purge_subscriptions = module.params.get("purge_subscriptions") + content_based_deduplication = module.params.get("content_based_deduplication") check_mode = module.check_mode - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - sns_topic = SnsTopicManager(module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode) - - if state == 'present': + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + sns_topic = SnsTopicManager( + module, + name, + topic_type, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + tags, + purge_tags, + content_based_deduplication, + check_mode, + ) + + if state == "present": changed = sns_topic.ensure_ok() - elif state == 'absent': + elif state == "absent": changed = sns_topic.ensure_gone() - sns_facts = dict(changed=changed, - sns_arn=sns_topic.topic_arn, - sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn)) + sns_facts = dict( + changed=changed, + sns_arn=sns_topic.topic_arn, + sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn), + ) module.exit_json(**sns_facts) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/sns_topic_info.py b/ansible_collections/community/aws/plugins/modules/sns_topic_info.py index ca6dd1aab..8cd712804 100644 --- a/ansible_collections/community/aws/plugins/modules/sns_topic_info.py +++ b/ansible_collections/community/aws/plugins/modules/sns_topic_info.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: sns_topic_info short_description: sns_topic_info module version_added: 3.2.0 @@ -21,12 +18,12 @@ options: required: false type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: list all the topics community.aws.sns_topic_info: register: sns_topic_list @@ -35,9 +32,9 @@ EXAMPLES = r''' community.aws.sns_topic_info: topic_arn: "{{ sns_arn }}" register: sns_topic_info -''' +""" -RETURN = r''' +RETURN = r""" result: description: - The result contaning the details of one or all AWS SNS topics. @@ -132,7 +129,7 @@ result: description: The type of topic. type: str sample: "standard" -''' +""" try: @@ -140,26 +137,26 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.community.aws.plugins.module_utils.sns import list_topics +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import get_info +from ansible_collections.community.aws.plugins.module_utils.sns import list_topics def main(): argument_spec = dict( - topic_arn=dict(type='str', required=False), + topic_arn=dict(type="str", required=False), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - topic_arn = module.params.get('topic_arn') + topic_arn = module.params.get("topic_arn") try: - connection = module.client('sns', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("sns", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") if topic_arn: results = dict(sns_arn=topic_arn, sns_topic=get_info(connection, module, topic_arn)) @@ -169,5 +166,5 @@ def main(): module.exit_json(result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/sqs_queue.py b/ansible_collections/community/aws/plugins/modules/sqs_queue.py index 211e64b26..ad3ce68a7 100644 --- a/ansible_collections/community/aws/plugins/modules/sqs_queue.py +++ b/ansible_collections/community/aws/plugins/modules/sqs_queue.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: sqs_queue version_added: 1.0.0 @@ -104,13 +102,13 @@ options: - Enables content-based deduplication. Used for FIFOs only. - Defaults to C(false). extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -RETURN = r''' +RETURN = r""" content_based_deduplication: description: Enables content-based deduplication. Used for FIFOs only. type: bool @@ -186,9 +184,9 @@ tags: type: dict returned: always sample: '{"Env": "prod"}' -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create SQS queue with redrive policy community.aws.sqs_queue: name: my-queue @@ -258,7 +256,7 @@ EXAMPLES = r''' name: my-queue region: ap-southeast-2 state: absent -''' +""" import json @@ -270,26 +268,27 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_queue_name(module, is_fifo=False): - name = module.params.get('name') - if not is_fifo or name.endswith('.fifo'): + name = module.params.get("name") + if not is_fifo or name.endswith(".fifo"): return name - return name + '.fifo' + return name + ".fifo" # NonExistentQueue is explicitly expected when a queue doesn't exist @AWSRetry.jittered_backoff() def get_queue_url(client, name): try: - return client.get_queue_url(QueueName=name)['QueueUrl'] - except is_boto3_error_code('AWS.SimpleQueueService.NonExistentQueue'): + return client.get_queue_url(QueueName=name)["QueueUrl"] + except is_boto3_error_code("AWS.SimpleQueueService.NonExistentQueue"): return None @@ -297,13 +296,13 @@ def describe_queue(client, queue_url): """ Description a queue in snake format """ - attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)["Attributes"] description = dict(attributes) - description.pop('Policy', None) - description.pop('RedrivePolicy', None) + description.pop("Policy", None) + description.pop("RedrivePolicy", None) description = camel_dict_to_snake_dict(description) - description['policy'] = attributes.get('Policy', None) - description['redrive_policy'] = attributes.get('RedrivePolicy', None) + description["policy"] = attributes.get("Policy", None) + description["redrive_policy"] = attributes.get("RedrivePolicy", None) # Boto3 returns everything as a string, convert them back to integers/dicts if # that's what we expected. @@ -311,12 +310,12 @@ def describe_queue(client, queue_url): if value is None: continue - if key in ['policy', 'redrive_policy']: + if key in ["policy", "redrive_policy"]: policy = json.loads(value) description[key] = policy continue - if key == 'content_based_deduplication': + if key == "content_based_deduplication": try: description[key] = bool(value) except (TypeError, ValueError): @@ -332,49 +331,48 @@ def describe_queue(client, queue_url): def create_or_update_sqs_queue(client, module): - is_fifo = (module.params.get('queue_type') == 'fifo') - kms_master_key_id = module.params.get('kms_master_key_id') + is_fifo = module.params.get("queue_type") == "fifo" + kms_master_key_id = module.params.get("kms_master_key_id") queue_name = get_queue_name(module, is_fifo) result = dict( name=queue_name, - region=module.params.get('region'), + region=module.params.get("region"), changed=False, ) queue_url = get_queue_url(client, queue_name) - result['queue_url'] = queue_url + result["queue_url"] = queue_url # Create a dict() to hold attributes that will be passed to boto3 create_attributes = {} if not queue_url: if is_fifo: - create_attributes['FifoQueue'] = "True" + create_attributes["FifoQueue"] = "True" if kms_master_key_id: - create_attributes['KmsMasterKeyId'] = kms_master_key_id - result['changed'] = True + create_attributes["KmsMasterKeyId"] = kms_master_key_id + result["changed"] = True if module.check_mode: return result - queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl'] + queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)["QueueUrl"] changed, arn = update_sqs_queue(module, client, queue_url) - result['changed'] |= changed - result['queue_arn'] = arn + result["changed"] |= changed + result["queue_arn"] = arn changed, tags = update_tags(client, queue_url, module) - result['changed'] |= changed - result['tags'] = tags + result["changed"] |= changed + result["tags"] = tags result.update(describe_queue(client, queue_url)) COMPATABILITY_KEYS = dict( - delay_seconds='delivery_delay', - receive_message_wait_time_seconds='receive_message_wait_time', - visibility_timeout='default_visibility_timeout', - kms_data_key_reuse_period_seconds='kms_data_key_reuse_period', + delay_seconds="delivery_delay", + receive_message_wait_time_seconds="receive_message_wait_time", + visibility_timeout="default_visibility_timeout", + kms_data_key_reuse_period_seconds="kms_data_key_reuse_period", ) for key in list(result.keys()): - # The return values changed between boto and boto3, add the old keys too # for backwards compatibility return_name = COMPATABILITY_KEYS.get(key) @@ -387,30 +385,32 @@ def create_or_update_sqs_queue(client, module): def update_sqs_queue(module, client, queue_url): check_mode = module.check_mode changed = False - existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)[ + "Attributes" + ] new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True) attributes_to_set = dict() # Boto3 SQS deals with policies as strings, we want to deal with them as # dicts - if module.params.get('policy') is not None: - policy = module.params.get('policy') - current_value = existing_attributes.get('Policy', '{}') + if module.params.get("policy") is not None: + policy = module.params.get("policy") + current_value = existing_attributes.get("Policy", "{}") current_policy = json.loads(current_value) if compare_policies(current_policy, policy): - attributes_to_set['Policy'] = json.dumps(policy) + attributes_to_set["Policy"] = json.dumps(policy) changed = True - if module.params.get('redrive_policy') is not None: - policy = module.params.get('redrive_policy') - current_value = existing_attributes.get('RedrivePolicy', '{}') + if module.params.get("redrive_policy") is not None: + policy = module.params.get("redrive_policy") + current_value = existing_attributes.get("RedrivePolicy", "{}") current_policy = json.loads(current_value) if compare_policies(current_policy, policy): - attributes_to_set['RedrivePolicy'] = json.dumps(policy) + attributes_to_set["RedrivePolicy"] = json.dumps(policy) changed = True for attribute, value in existing_attributes.items(): # We handle these as a special case because they're IAM policies - if attribute in ['Policy', 'RedrivePolicy']: + if attribute in ["Policy", "RedrivePolicy"]: continue if attribute not in new_attributes.keys(): @@ -435,23 +435,19 @@ def update_sqs_queue(module, client, queue_url): if changed and not check_mode: client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) - return changed, existing_attributes.get('queue_arn') + return changed, existing_attributes.get("queue_arn") def delete_sqs_queue(client, module): - is_fifo = (module.params.get('queue_type') == 'fifo') + is_fifo = module.params.get("queue_type") == "fifo" queue_name = get_queue_name(module, is_fifo) - result = dict( - name=queue_name, - region=module.params.get('region'), - changed=False - ) + result = dict(name=queue_name, region=module.params.get("region"), changed=False) queue_url = get_queue_url(client, queue_name) if not queue_url: return result - result['changed'] = bool(queue_url) + result["changed"] = bool(queue_url) if not module.check_mode: AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url) @@ -459,13 +455,13 @@ def delete_sqs_queue(client, module): def update_tags(client, queue_url, module): - new_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + new_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") if new_tags is None: return False, {} try: - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags'] + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e: existing_tags = {} @@ -476,7 +472,7 @@ def update_tags(client, queue_url, module): client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True) if tags_to_add: client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add) - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {}) + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get("Tags", {}) else: existing_tags = new_tags @@ -485,41 +481,40 @@ def update_tags(client, queue_url, module): def main(): - argument_spec = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']), - delay_seconds=dict(type='int', aliases=['delivery_delay']), - maximum_message_size=dict(type='int'), - message_retention_period=dict(type='int'), - policy=dict(type='dict'), - receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']), - redrive_policy=dict(type='dict'), - visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']), - kms_master_key_id=dict(type='str'), - fifo_throughput_limit=dict(type='str', choices=["perQueue", "perMessageGroupId"]), - deduplication_scope=dict(type='str', choices=['queue', 'messageGroup']), - kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False), - content_based_deduplication=dict(type='bool'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + queue_type=dict(type="str", default="standard", choices=["standard", "fifo"]), + delay_seconds=dict(type="int", aliases=["delivery_delay"]), + maximum_message_size=dict(type="int"), + message_retention_period=dict(type="int"), + policy=dict(type="dict"), + receive_message_wait_time_seconds=dict(type="int", aliases=["receive_message_wait_time"]), + redrive_policy=dict(type="dict"), + visibility_timeout=dict(type="int", aliases=["default_visibility_timeout"]), + kms_master_key_id=dict(type="str"), + fifo_throughput_limit=dict(type="str", choices=["perQueue", "perMessageGroupId"]), + deduplication_scope=dict(type="str", choices=["queue", "messageGroup"]), + kms_data_key_reuse_period_seconds=dict(type="int", aliases=["kms_data_key_reuse_period"], no_log=False), + content_based_deduplication=dict(type="bool"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') - retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue']) + state = module.params.get("state") + retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=["AWS.SimpleQueueService.NonExistentQueue"]) try: - client = module.client('sqs', retry_decorator=retry_decorator) - if state == 'present': + client = module.client("sqs", retry_decorator=retry_decorator) + if state == "present": result = create_or_update_sqs_queue(client, module) - elif state == 'absent': + elif state == "absent": result = delete_sqs_queue(client, module) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to control sqs queue') + module.fail_json_aws(e, msg="Failed to control sqs queue") else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/ssm_inventory_info.py b/ansible_collections/community/aws/plugins/modules/ssm_inventory_info.py new file mode 100644 index 000000000..c5b849097 --- /dev/null +++ b/ansible_collections/community/aws/plugins/modules/ssm_inventory_info.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = """ +module: ssm_inventory_info +version_added: 6.0.0 +short_description: Get SSM inventory information for EC2 instance + +description: + - Gather SSM inventory for EC2 instance configured with SSM. + +author: 'Aubin Bikouo (@abikouo)' + +options: + instance_id: + description: + - EC2 instance id. + required: true + type: str + +extends_documentation_fragment: +- amazon.aws.common.modules +- amazon.aws.region.modules +- amazon.aws.boto3 +""" + +EXAMPLES = """ +- name: Retrieve SSM inventory info for instance id 'i-012345678902' + community.aws.ssm_inventory_info: + instance_id: 'i-012345678902' +""" + + +RETURN = """ +ssm_inventory: + returned: on success + description: > + SSM inventory information. + type: dict + sample: { + 'agent_type': 'amazon-ssm-agent', + 'agent_version': '3.2.582.0', + 'computer_name': 'ip-172-31-44-166.ec2.internal', + 'instance_id': 'i-039eb9b1f55934ab6', + 'instance_status': 'Active', + 'ip_address': '172.31.44.166', + 'platform_name': 'Fedora Linux', + 'platform_type': 'Linux', + 'platform_version': '37', + 'resource_type': 'EC2Instance' + } +""" + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +class SsmInventoryInfoFailure(Exception): + def __init__(self, exc, msg): + self.exc = exc + self.msg = msg + super().__init__(self) + + +def get_ssm_inventory(connection, filters): + try: + return connection.get_inventory(Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise SsmInventoryInfoFailure(exc=e, msg="get_ssm_inventory() failed.") + + +def execute_module(module, connection): + instance_id = module.params.get("instance_id") + try: + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": [instance_id]}] + + response = get_ssm_inventory(connection, filters) + entities = response.get("Entities", []) + ssm_inventory = {} + if entities: + content = entities[0].get("Data", {}).get("AWS:InstanceInformation", {}).get("Content", []) + if content: + ssm_inventory = camel_dict_to_snake_dict(content[0]) + module.exit_json(changed=False, ssm_inventory=ssm_inventory) + except SsmInventoryInfoFailure as e: + module.fail_json_aws(exception=e.exc, msg=e.msg) + + +def main(): + argument_spec = dict( + instance_id=dict(required=True, type="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + connection = module.client("ssm") + execute_module(module, connection) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/aws/plugins/modules/ssm_parameter.py b/ansible_collections/community/aws/plugins/modules/ssm_parameter.py index c435305c2..aefafca00 100644 --- a/ansible_collections/community/aws/plugins/modules/ssm_parameter.py +++ b/ansible_collections/community/aws/plugins/modules/ssm_parameter.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ssm_parameter version_added: 1.0.0 @@ -86,18 +84,17 @@ author: - "Bill Wang (@ozbillwang) <ozbillwang@gmail.com>" - "Michael De La Rue (@mikedlr)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags - notes: - Support for I(tags) and I(purge_tags) was added in release 5.3.0. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create or update key/value pair in AWS SSM parameter store community.aws.ssm_parameter: name: "Hello" @@ -165,9 +162,9 @@ EXAMPLES = ''' community.aws.ssm_parameter: name: "Hello" tags: {} -''' +""" -RETURN = ''' +RETURN = r""" parameter_metadata: type: dict description: @@ -242,30 +239,32 @@ parameter_metadata: returned: when the parameter has tags example: {'MyTagName': 'Some Value'} version_added: 5.3.0 -''' +""" import time try: import botocore - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class ParameterWaiterFactory(BaseWaiterFactory): def __init__(self, module): - client = module.client('ssm') + client = module.client("ssm") super(ParameterWaiterFactory, self).__init__(module, client) @property @@ -273,22 +272,24 @@ class ParameterWaiterFactory(BaseWaiterFactory): data = super(ParameterWaiterFactory, self)._waiter_model_data ssm_data = dict( parameter_exists=dict( - operation='DescribeParameters', - delay=1, maxAttempts=20, + operation="DescribeParameters", + delay=1, + maxAttempts=20, acceptors=[ - dict(state='retry', matcher='error', expected='ParameterNotFound'), - dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) == `0`'), - dict(state='success', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), - ] + dict(state="retry", matcher="error", expected="ParameterNotFound"), + dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) == `0`"), + dict(state="success", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), + ], ), parameter_deleted=dict( - operation='DescribeParameters', - delay=1, maxAttempts=20, + operation="DescribeParameters", + delay=1, + maxAttempts=20, acceptors=[ - dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), - dict(state='success', matcher='path', expected=True, argument='length(Parameters[]) == `0`'), - dict(state='success', matcher='error', expected='ParameterNotFound'), - ] + dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), + dict(state="success", matcher="path", expected=True, argument="length(Parameters[]) == `0`"), + dict(state="success", matcher="error", expected="ParameterNotFound"), + ], ), ) data.update(ssm_data) @@ -299,10 +300,10 @@ def _wait_exists(client, module, name): if module.check_mode: return wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter('parameter_exists') + waiter = wf.get_waiter("parameter_exists") try: waiter.wait( - ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ParameterFilters=[{"Key": "Name", "Values": [name]}], ) except botocore.exceptions.WaiterError: module.warn("Timeout waiting for parameter to exist") @@ -317,7 +318,7 @@ def _wait_updated(client, module, name, version): for x in range(1, 10): try: parameter = describe_parameter(client, module, ParameterFilters=[{"Key": "Name", "Values": [name]}]) - if parameter.get('Version', 0) > version: + if parameter.get("Version", 0) > version: return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe parameter while waiting for update") @@ -328,10 +329,10 @@ def _wait_deleted(client, module, name): if module.check_mode: return wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter('parameter_deleted') + waiter = wf.get_waiter("parameter_deleted") try: waiter.wait( - ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ParameterFilters=[{"Key": "Name", "Values": [name]}], ) except botocore.exceptions.WaiterError: module.warn("Timeout waiting for parameter to exist") @@ -341,24 +342,27 @@ def _wait_deleted(client, module, name): def tag_parameter(client, module, parameter_name, tags): try: - return client.add_tags_to_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name, Tags=tags) + return client.add_tags_to_resource( + aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, Tags=tags + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to add tag(s) to parameter") def untag_parameter(client, module, parameter_name, tag_keys): try: - return client.remove_tags_from_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name, TagKeys=tag_keys) + return client.remove_tags_from_resource( + aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, TagKeys=tag_keys + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to remove tag(s) from parameter") def get_parameter_tags(client, module, parameter_name): try: - tags = client.list_tags_for_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name)['TagList'] + tags = client.list_tags_for_resource(aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name)[ + "TagList" + ] tags_dict = boto3_tag_list_to_ansible_dict(tags) return tags_dict except (BotoCoreError, ClientError) as e: @@ -373,14 +377,12 @@ def update_parameter_tags(client, module, parameter_name, supplied_tags): return False, response current_tags = get_parameter_tags(client, module, parameter_name) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, - module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, module.params.get("purge_tags")) if tags_to_add: if module.check_mode: return True, response - response = tag_parameter(client, module, parameter_name, - ansible_dict_to_boto3_tag_list(tags_to_add)) + response = tag_parameter(client, module, parameter_name, ansible_dict_to_boto3_tag_list(tags_to_add)) changed = True if tags_to_remove: if module.check_mode: @@ -408,16 +410,16 @@ def update_parameter(client, module, **args): @AWSRetry.jittered_backoff() def describe_parameter(client, module, **args): - paginator = client.get_paginator('describe_parameters') + paginator = client.get_paginator("describe_parameters") existing_parameter = paginator.paginate(**args).build_full_result() - if not existing_parameter['Parameters']: + if not existing_parameter["Parameters"]: return None - tags_dict = get_parameter_tags(client, module, module.params.get('name')) - existing_parameter['Parameters'][0]['tags'] = tags_dict + tags_dict = get_parameter_tags(client, module, module.params.get("name")) + existing_parameter["Parameters"][0]["tags"] = tags_dict - return existing_parameter['Parameters'][0] + return existing_parameter["Parameters"][0] def create_update_parameter(client, module): @@ -425,82 +427,78 @@ def create_update_parameter(client, module): existing_parameter = None response = {} - args = dict( - Name=module.params.get('name'), - Type=module.params.get('string_type'), - Tier=module.params.get('tier') - ) + args = dict(Name=module.params.get("name"), Type=module.params.get("string_type"), Tier=module.params.get("tier")) - if (module.params.get('overwrite_value') in ("always", "changed")): + if module.params.get("overwrite_value") in ("always", "changed"): args.update(Overwrite=True) else: args.update(Overwrite=False) - if module.params.get('value') is not None: - args.update(Value=module.params.get('value')) + if module.params.get("value") is not None: + args.update(Value=module.params.get("value")) - if module.params.get('description'): - args.update(Description=module.params.get('description')) + if module.params.get("description"): + args.update(Description=module.params.get("description")) - if module.params.get('string_type') == 'SecureString': - args.update(KeyId=module.params.get('key_id')) + if module.params.get("string_type") == "SecureString": + args.update(KeyId=module.params.get("key_id")) try: - existing_parameter = client.get_parameter(aws_retry=True, Name=args['Name'], WithDecryption=True) + existing_parameter = client.get_parameter(aws_retry=True, Name=args["Name"], WithDecryption=True) except botocore.exceptions.ClientError: pass except botocore.exceptions.BotoCoreError as e: module.fail_json_aws(e, msg="fetching parameter") if existing_parameter: - original_version = existing_parameter['Parameter']['Version'] - if 'Value' not in args: - args['Value'] = existing_parameter['Parameter']['Value'] + original_version = existing_parameter["Parameter"]["Version"] + if "Value" not in args: + args["Value"] = existing_parameter["Parameter"]["Value"] - if (module.params.get('overwrite_value') == 'always'): + if module.params.get("overwrite_value") == "always": (changed, response) = update_parameter(client, module, **args) - elif (module.params.get('overwrite_value') == 'changed'): - if existing_parameter['Parameter']['Type'] != args['Type']: + elif module.params.get("overwrite_value") == "changed": + if existing_parameter["Parameter"]["Type"] != args["Type"]: (changed, response) = update_parameter(client, module, **args) - elif existing_parameter['Parameter']['Value'] != args['Value']: + elif existing_parameter["Parameter"]["Value"] != args["Value"]: (changed, response) = update_parameter(client, module, **args) - elif args.get('Description'): + elif args.get("Description"): # Description field not available from get_parameter function so get it from describe_parameters try: describe_existing_parameter = describe_parameter( - client, module, - ParameterFilters=[{"Key": "Name", "Values": [args['Name']]}]) + client, module, ParameterFilters=[{"Key": "Name", "Values": [args["Name"]]}] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter.get('Description') != args['Description']: + if describe_existing_parameter.get("Description") != args["Description"]: (changed, response) = update_parameter(client, module, **args) if changed: - _wait_updated(client, module, module.params.get('name'), original_version) + _wait_updated(client, module, module.params.get("name"), original_version) # Handle tag updates for existing parameters - if module.params.get('overwrite_value') != 'never': + if module.params.get("overwrite_value") != "never": tags_changed, tags_response = update_parameter_tags( - client, module, existing_parameter['Parameter']['Name'], - module.params.get('tags')) + client, module, existing_parameter["Parameter"]["Name"], module.params.get("tags") + ) changed = changed or tags_changed if tags_response: - response['tag_updates'] = tags_response + response["tag_updates"] = tags_response else: # Add tags in initial creation request - if module.params.get('tags'): - args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get('tags'))) + if module.params.get("tags"): + args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get("tags"))) # Overwrite=True conflicts with tags and is not needed for new param args.update(Overwrite=False) (changed, response) = update_parameter(client, module, **args) - _wait_exists(client, module, module.params.get('name')) + _wait_exists(client, module, module.params.get("name")) return changed, response @@ -509,8 +507,8 @@ def delete_parameter(client, module): response = {} try: - existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get('name'), WithDecryption=True) - except is_boto3_error_code('ParameterNotFound'): + existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get("name"), WithDecryption=True) + except is_boto3_error_code("ParameterNotFound"): return False, {} except botocore.exceptions.ClientError: # If we can't describe the parameter we may still be able to delete it @@ -524,23 +522,23 @@ def delete_parameter(client, module): return True, {} try: - response = client.delete_parameter( - aws_retry=True, - Name=module.params.get('name') - ) - except is_boto3_error_code('ParameterNotFound'): + response = client.delete_parameter(aws_retry=True, Name=module.params.get("name")) + except is_boto3_error_code("ParameterNotFound"): return False, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="deleting parameter") - _wait_deleted(client, module, module.params.get('name')) + _wait_deleted(client, module, module.params.get("name")) return True, response def setup_client(module): retry_decorator = AWSRetry.jittered_backoff() - connection = module.client('ssm', retry_decorator=retry_decorator) + connection = module.client("ssm", retry_decorator=retry_decorator) return connection @@ -549,14 +547,14 @@ def setup_module_object(): name=dict(required=True), description=dict(), value=dict(required=False, no_log=True), - state=dict(default='present', choices=['present', 'absent']), - string_type=dict(default='String', choices=['String', 'StringList', 'SecureString'], aliases=['type']), - decryption=dict(default=True, type='bool'), + state=dict(default="present", choices=["present", "absent"]), + string_type=dict(default="String", choices=["String", "StringList", "SecureString"], aliases=["type"]), + decryption=dict(default=True, type="bool"), key_id=dict(default="alias/aws/ssm"), - overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']), - tier=dict(default='Standard', choices=['Standard', 'Advanced', 'Intelligent-Tiering']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + overwrite_value=dict(default="changed", choices=["never", "changed", "always"]), + tier=dict(default="Standard", choices=["Standard", "Advanced", "Intelligent-Tiering"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) return AnsibleAWSModule( @@ -567,7 +565,7 @@ def setup_module_object(): def main(): module = setup_module_object() - state = module.params.get('state') + state = module.params.get("state") client = setup_client(module) invocations = { @@ -580,18 +578,17 @@ def main(): try: parameter_metadata = describe_parameter( - client, module, - ParameterFilters=[{"Key": "Name", "Values": [module.params.get('name')]}]) - except is_boto3_error_code('ParameterNotFound'): + client, module, ParameterFilters=[{"Key": "Name", "Values": [module.params.get("name")]}] + ) + except is_boto3_error_code("ParameterNotFound"): return False, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="to describe parameter") if parameter_metadata: - result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata, - ignore_list=['tags']) + result["parameter_metadata"] = camel_dict_to_snake_dict(parameter_metadata, ignore_list=["tags"]) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py index c141610bb..a2558c808 100644 --- a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py +++ b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2019, Tom De Keyser (@tdekeyser) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: stepfunctions_state_machine version_added: 1.0.0 @@ -44,16 +41,17 @@ options: choices: [ present, absent ] type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags author: - Tom De Keyser (@tdekeyser) -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Create a new AWS Step Functions state machine - name: Setup HelloWorld state machine community.aws.stepfunctions_state_machine: @@ -77,61 +75,62 @@ EXAMPLES = ''' community.aws.stepfunctions_state_machine: name: HelloWorldStateMachine state: absent -''' +""" -RETURN = ''' +RETURN = r""" state_machine_arn: description: ARN of the AWS Step Functions state machine type: str returned: always -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, - AWSRetry, - compare_aws_tags, - boto3_tag_list_to_ansible_dict, - ) +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def manage_state_machine(state, sfn_client, module): state_machine_arn = get_state_machine_arn(sfn_client, module) - if state == 'present': + if state == "present": if state_machine_arn is None: create(sfn_client, module) else: update(state_machine_arn, sfn_client, module) - elif state == 'absent': + elif state == "absent": if state_machine_arn is not None: remove(state_machine_arn, sfn_client, module) - check_mode(module, msg='State is up-to-date.') + check_mode(module, msg="State is up-to-date.") module.exit_json(changed=False, state_machine_arn=state_machine_arn) def create(sfn_client, module): - check_mode(module, msg='State machine would be created.', changed=True) + check_mode(module, msg="State machine would be created.", changed=True) - tags = module.params.get('tags') - sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else [] + tags = module.params.get("tags") + sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name="key", tag_value_key_name="value") if tags else [] state_machine = sfn_client.create_state_machine( - name=module.params.get('name'), - definition=module.params.get('definition'), - roleArn=module.params.get('role_arn'), - tags=sfn_tags + name=module.params.get("name"), + definition=module.params.get("definition"), + roleArn=module.params.get("role_arn"), + tags=sfn_tags, ) - module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn')) + module.exit_json(changed=True, state_machine_arn=state_machine.get("stateMachineArn")) def remove(state_machine_arn, sfn_client, module): - check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True) + check_mode(module, msg=f"State machine would be deleted: {state_machine_arn}", changed=True) sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) module.exit_json(changed=True, state_machine_arn=state_machine_arn) @@ -141,29 +140,28 @@ def update(state_machine_arn, sfn_client, module): tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module) if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove: - check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True) + check_mode(module, msg=f"State machine would be updated: {state_machine_arn}", changed=True) sfn_client.update_state_machine( stateMachineArn=state_machine_arn, - definition=module.params.get('definition'), - roleArn=module.params.get('role_arn') - ) - sfn_client.untag_resource( - resourceArn=state_machine_arn, - tagKeys=tags_to_remove + definition=module.params.get("definition"), + roleArn=module.params.get("role_arn"), ) + sfn_client.untag_resource(resourceArn=state_machine_arn, tagKeys=tags_to_remove) sfn_client.tag_resource( resourceArn=state_machine_arn, - tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value') + tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name="key", tag_value_key_name="value"), ) module.exit_json(changed=True, state_machine_arn=state_machine_arn) def compare_tags(state_machine_arn, sfn_client, module): - new_tags = module.params.get('tags') - current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags') - return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags')) + new_tags = module.params.get("tags") + current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get("tags") + return compare_aws_tags( + boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get("purge_tags") + ) def params_changed(state_machine_arn, sfn_client, module): @@ -172,7 +170,9 @@ def params_changed(state_machine_arn, sfn_client, module): from the existing state machine parameters. """ current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn) - return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn') + return current.get("definition") != module.params.get("definition") or current.get("roleArn") != module.params.get( + "role_arn" + ) def get_state_machine_arn(sfn_client, module): @@ -180,42 +180,42 @@ def get_state_machine_arn(sfn_client, module): Finds the state machine ARN based on the name parameter. Returns None if there is no state machine with this name. """ - target_name = module.params.get('name') - all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines') + target_name = module.params.get("name") + all_state_machines = sfn_client.list_state_machines(aws_retry=True).get("stateMachines") for state_machine in all_state_machines: - if state_machine.get('name') == target_name: - return state_machine.get('stateMachineArn') + if state_machine.get("name") == target_name: + return state_machine.get("stateMachineArn") -def check_mode(module, msg='', changed=False): +def check_mode(module, msg="", changed=False): if module.check_mode: module.exit_json(changed=changed, output=msg) def main(): module_args = dict( - name=dict(type='str', required=True), - definition=dict(type='json'), - role_arn=dict(type='str'), - state=dict(choices=['present', 'absent'], default='present'), - tags=dict(default=None, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + name=dict(type="str", required=True), + definition=dict(type="json"), + role_arn=dict(type="str"), + state=dict(choices=["present", "absent"], default="present"), + tags=dict(default=None, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])], - supports_check_mode=True + required_if=[("state", "present", ["role_arn"]), ("state", "present", ["definition"])], + supports_check_mode=True, ) - sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5)) - state = module.params.get('state') + sfn_client = module.client("stepfunctions", retry_decorator=AWSRetry.jittered_backoff(retries=5)) + state = module.params.get("state") try: manage_state_machine(state, sfn_client, module) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to manage state machine') + module.fail_json_aws(e, msg="Failed to manage state machine") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py index aacfa987f..b7a9f7efb 100644 --- a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py +++ b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2019, Prasad Katti (@prasadkatti) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: stepfunctions_state_machine_execution version_added: 1.0.0 @@ -47,16 +44,16 @@ options: type: str default: '' -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - author: - Prasad Katti (@prasadkatti) -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" - name: Start an execution of a state machine community.aws.stepfunctions_state_machine_execution: name: an_execution_name @@ -69,9 +66,9 @@ EXAMPLES = ''' execution_arn: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" cause: "cause of task failure" error: "error code of the failure" -''' +""" -RETURN = ''' +RETURN = r""" execution_arn: description: ARN of the AWS Step Functions state machine execution. type: str @@ -87,7 +84,7 @@ stop_date: type: str returned: if action == stop sample: "2019-11-02T22:39:49.071000-07:00" -''' +""" try: @@ -97,100 +94,96 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def start_execution(module, sfn_client): - ''' + """ start_execution uses execution name to determine if a previous execution already exists. If an execution by the provided name exists, call client.start_execution will not be called. - ''' + """ - state_machine_arn = module.params.get('state_machine_arn') - name = module.params.get('name') - execution_input = module.params.get('execution_input') + state_machine_arn = module.params.get("state_machine_arn") + name = module.params.get("name") + execution_input = module.params.get("execution_input") try: # list_executions is eventually consistent - page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn) + page_iterators = sfn_client.get_paginator("list_executions").paginate(stateMachineArn=state_machine_arn) - for execution in page_iterators.build_full_result()['executions']: - if name == execution['name']: - check_mode(module, msg='State machine execution already exists.', changed=False) + for execution in page_iterators.build_full_result()["executions"]: + if name == execution["name"]: + check_mode(module, msg="State machine execution already exists.", changed=False) module.exit_json(changed=False) - check_mode(module, msg='State machine execution would be started.', changed=True) - res_execution = sfn_client.start_execution( - stateMachineArn=state_machine_arn, - name=name, - input=execution_input - ) - except is_boto3_error_code('ExecutionAlreadyExists'): + check_mode(module, msg="State machine execution would be started.", changed=True) + res_execution = sfn_client.start_execution(stateMachineArn=state_machine_arn, name=name, input=execution_input) + except is_boto3_error_code("ExecutionAlreadyExists"): # this will never be executed anymore module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to start execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution)) def stop_execution(module, sfn_client): - - cause = module.params.get('cause') - error = module.params.get('error') - execution_arn = module.params.get('execution_arn') + cause = module.params.get("cause") + error = module.params.get("error") + execution_arn = module.params.get("execution_arn") try: # describe_execution is eventually consistent - execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status'] - if execution_status != 'RUNNING': - check_mode(module, msg='State machine execution is not running.', changed=False) + execution_status = sfn_client.describe_execution(executionArn=execution_arn)["status"] + if execution_status != "RUNNING": + check_mode(module, msg="State machine execution is not running.", changed=False) module.exit_json(changed=False) - check_mode(module, msg='State machine execution would be stopped.', changed=True) - res = sfn_client.stop_execution( - executionArn=execution_arn, - cause=cause, - error=error - ) + check_mode(module, msg="State machine execution would be stopped.", changed=True) + res = sfn_client.stop_execution(executionArn=execution_arn, cause=cause, error=error) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to stop execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res)) -def check_mode(module, msg='', changed=False): +def check_mode(module, msg="", changed=False): if module.check_mode: module.exit_json(changed=changed, output=msg) def main(): module_args = dict( - action=dict(choices=['start', 'stop'], default='start'), - name=dict(type='str'), - execution_input=dict(type='json', default={}), - state_machine_arn=dict(type='str'), - cause=dict(type='str', default=''), - error=dict(type='str', default=''), - execution_arn=dict(type='str') + action=dict(choices=["start", "stop"], default="start"), + name=dict(type="str"), + execution_input=dict(type="json", default={}), + state_machine_arn=dict(type="str"), + cause=dict(type="str", default=""), + error=dict(type="str", default=""), + execution_arn=dict(type="str"), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[('action', 'start', ['name', 'state_machine_arn']), - ('action', 'stop', ['execution_arn']), - ], - supports_check_mode=True + required_if=[ + ("action", "start", ["name", "state_machine_arn"]), + ("action", "stop", ["execution_arn"]), + ], + supports_check_mode=True, ) - sfn_client = module.client('stepfunctions') + sfn_client = module.client("stepfunctions") - action = module.params.get('action') + action = module.params.get("action") if action == "start": start_execution(module, sfn_client) else: stop_execution(module, sfn_client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/storagegateway_info.py b/ansible_collections/community/aws/plugins/modules/storagegateway_info.py index 3f3c3ae2f..55b7c4685 100644 --- a/ansible_collections/community/aws/plugins/modules/storagegateway_info.py +++ b/ansible_collections/community/aws/plugins/modules/storagegateway_info.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # This module is sponsored by E.T.A.I. (www.etai.fr) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: storagegateway_info version_added: 1.0.0 @@ -45,12 +43,12 @@ options: required: false default: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -RETURN = ''' +RETURN = r""" gateways: description: list of gateway objects returned: always @@ -161,47 +159,49 @@ gateways: returned: always type: str sample: "present" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: "Get AWS storage gateway information" - community.aws.aws_sgw_info: + community.aws.storagegateway_info: - name: "Get AWS storage gateway information for region eu-west-3" - community.aws.aws_sgw_info: + community.aws.storagegateway_info: region: eu-west-3 -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class SGWInformationManager(object): def __init__(self, client, module): self.client = client self.module = module - self.name = self.module.params.get('name') + self.name = self.module.params.get("name") def fetch(self): gateways = self.list_gateways() for gateway in gateways: - if self.module.params.get('gather_local_disks'): + if self.module.params.get("gather_local_disks"): self.list_local_disks(gateway) # File share gateway - if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'): + if gateway["gateway_type"] == "FILE_S3" and self.module.params.get("gather_file_shares"): self.list_gateway_file_shares(gateway) # Volume tape gateway - elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'): + elif gateway["gateway_type"] == "VTL" and self.module.params.get("gather_tapes"): self.list_gateway_vtl(gateway) # iSCSI gateway - elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'): + elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get("gather_volumes"): self.list_gateway_volumes(gateway) self.module.exit_json(gateways=gateways) @@ -209,12 +209,13 @@ class SGWInformationManager(object): """ List all storage gateways for the AWS endpoint. """ + def list_gateways(self): try: - paginator = self.client.get_paginator('list_gateways') + paginator = self.client.get_paginator("list_gateways") response = paginator.paginate( PaginationConfig={ - 'PageSize': 100, + "PageSize": 100, } ).build_full_result() @@ -231,6 +232,7 @@ class SGWInformationManager(object): Read file share objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ + @staticmethod def _read_gateway_fileshare_response(fileshares, aws_reponse): for share in aws_reponse["FileShareInfoList"]: @@ -244,22 +246,16 @@ class SGWInformationManager(object): """ List file shares attached to AWS storage gateway when in S3 mode. """ + def list_gateway_file_shares(self, gateway): try: - response = self.client.list_file_shares( - GatewayARN=gateway["gateway_arn"], - Limit=100 - ) + response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Limit=100) gateway["file_shares"] = [] marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) while marker is not None: - response = self.client.list_file_shares( - GatewayARN=gateway["gateway_arn"], - Marker=marker, - Limit=100 - ) + response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Marker=marker, Limit=100) marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) except (BotoCoreError, ClientError) as e: @@ -268,10 +264,13 @@ class SGWInformationManager(object): """ List storage gateway local disks """ + def list_local_disks(self, gateway): try: - gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in - self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']] + gateway["local_disks"] = [ + camel_dict_to_snake_dict(disk) + for disk in self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])["Disks"] + ] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks") @@ -279,6 +278,7 @@ class SGWInformationManager(object): Read tape objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ + @staticmethod def _read_gateway_tape_response(tapes, aws_response): for tape in aws_response["TapeInfos"]: @@ -292,20 +292,16 @@ class SGWInformationManager(object): """ List VTL & VTS attached to AWS storage gateway in VTL mode """ + def list_gateway_vtl(self, gateway): try: - response = self.client.list_tapes( - Limit=100 - ) + response = self.client.list_tapes(Limit=100) gateway["tapes"] = [] marker = self._read_gateway_tape_response(gateway["tapes"], response) while marker is not None: - response = self.client.list_tapes( - Marker=marker, - Limit=100 - ) + response = self.client.list_tapes(Marker=marker, Limit=100) marker = self._read_gateway_tape_response(gateway["tapes"], response) except (BotoCoreError, ClientError) as e: @@ -314,14 +310,15 @@ class SGWInformationManager(object): """ List volumes attached to AWS storage gateway in CACHED or STORAGE mode """ + def list_gateway_volumes(self, gateway): try: - paginator = self.client.get_paginator('list_volumes') + paginator = self.client.get_paginator("list_volumes") response = paginator.paginate( GatewayARN=gateway["gateway_arn"], PaginationConfig={ - 'PageSize': 100, - } + "PageSize": 100, + }, ).build_full_result() gateway["volumes"] = [] @@ -339,10 +336,10 @@ class SGWInformationManager(object): def main(): argument_spec = dict( - gather_local_disks=dict(type='bool', default=True), - gather_tapes=dict(type='bool', default=True), - gather_file_shares=dict(type='bool', default=True), - gather_volumes=dict(type='bool', default=True) + gather_local_disks=dict(type="bool", default=True), + gather_tapes=dict(type="bool", default=True), + gather_file_shares=dict(type="bool", default=True), + gather_volumes=dict(type="bool", default=True), ) module = AnsibleAWSModule( @@ -350,13 +347,13 @@ def main(): supports_check_mode=True, ) - client = module.client('storagegateway') + client = module.client("storagegateway") if client is None: # this should never happen - module.fail_json(msg='Unknown error, failed to create storagegateway client, no information available.') + module.fail_json(msg="Unknown error, failed to create storagegateway client, no information available.") SGWInformationManager(client, module).fetch() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/sts_assume_role.py b/ansible_collections/community/aws/plugins/modules/sts_assume_role.py deleted file mode 100644 index 8e5a3b4fe..000000000 --- a/ansible_collections/community/aws/plugins/modules/sts_assume_role.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sts_assume_role -version_added: 1.0.0 -short_description: Assume a role using AWS Security Token Service and obtain temporary credentials -description: - - Assume a role using AWS Security Token Service and obtain temporary credentials. -author: - - Boris Ekelchik (@bekelchik) - - Marek Piatek (@piontas) -options: - role_arn: - description: - - The Amazon Resource Name (ARN) of the role that the caller is - assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). - required: true - type: str - role_session_name: - description: - - Name of the role's session - will be used by CloudTrail. - required: true - type: str - policy: - description: - - Supplemental policy to use in addition to assumed role's policies. - type: str - duration_seconds: - description: - - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours). - - The max depends on the IAM role's sessions duration setting. - - By default, the value is set to 3600 seconds. - type: int - external_id: - description: - - A unique identifier that is used by third parties to assume a role in their customers' accounts. - type: str - mfa_serial_number: - description: - - The identification number of the MFA device that is associated with the user who is making the AssumeRole call. - type: str - mfa_token: - description: - - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. - type: str -notes: - - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token. -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -RETURN = ''' -sts_creds: - description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token - returned: always - type: dict - sample: - access_key: XXXXXXXXXXXXXXXXXXXX - expiration: '2017-11-11T11:11:11+00:00' - secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -sts_user: - description: The Amazon Resource Name (ARN) and the assumed role ID - returned: always - type: dict - sample: - assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob - arn: ARO123EXAMPLE123:Bob -changed: - description: True if obtaining the credentials succeeds - type: bool - returned: always -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) -- community.aws.sts_assume_role: - role_arn: "arn:aws:iam::123456789012:role/someRole" - role_session_name: "someRoleSession" - register: assumed_role - -# Use the assumed role above to tag an instance in account 123456789012 -- amazon.aws.ec2_tag: - aws_access_key: "{{ assumed_role.sts_creds.access_key }}" - aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" - security_token: "{{ assumed_role.sts_creds.session_token }}" - resource: i-xyzxyz01 - state: present - tags: - MyNewTag: value - -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -try: - from botocore.exceptions import ClientError, ParamValidationError -except ImportError: - pass # caught by AnsibleAWSModule - - -def _parse_response(response): - credentials = response.get('Credentials', {}) - user = response.get('AssumedRoleUser', {}) - - sts_cred = { - 'access_key': credentials.get('AccessKeyId'), - 'secret_key': credentials.get('SecretAccessKey'), - 'session_token': credentials.get('SessionToken'), - 'expiration': credentials.get('Expiration') - - } - sts_user = camel_dict_to_snake_dict(user) - return sts_cred, sts_user - - -def assume_role_policy(connection, module): - params = { - 'RoleArn': module.params.get('role_arn'), - 'RoleSessionName': module.params.get('role_session_name'), - 'Policy': module.params.get('policy'), - 'DurationSeconds': module.params.get('duration_seconds'), - 'ExternalId': module.params.get('external_id'), - 'SerialNumber': module.params.get('mfa_serial_number'), - 'TokenCode': module.params.get('mfa_token') - } - changed = False - - kwargs = dict((k, v) for k, v in params.items() if v is not None) - - try: - response = connection.assume_role(**kwargs) - changed = True - except (ClientError, ParamValidationError) as e: - module.fail_json_aws(e) - - sts_cred, sts_user = _parse_response(response) - module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user) - - -def main(): - argument_spec = dict( - role_arn=dict(required=True), - role_session_name=dict(required=True), - duration_seconds=dict(required=False, default=None, type='int'), - external_id=dict(required=False, default=None), - policy=dict(required=False, default=None), - mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None, no_log=True) - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - connection = module.client('sts') - - assume_role_policy(connection, module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/aws/plugins/modules/sts_session_token.py b/ansible_collections/community/aws/plugins/modules/sts_session_token.py index 03df560e9..cb9f99fd3 100644 --- a/ansible_collections/community/aws/plugins/modules/sts_session_token.py +++ b/ansible_collections/community/aws/plugins/modules/sts_session_token.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: sts_session_token version_added: 1.0.0 -short_description: Obtain a session token from the AWS Security Token Service +short_description: obtain a session token from the AWS Security Token Service description: - - Obtain a session token from the AWS Security Token Service. -author: Victor Costan (@pwnall) + - Obtain a session token from the AWS Security Token Service. +author: + - Victor Costan (@pwnall) options: duration_seconds: description: @@ -30,20 +29,21 @@ options: - The value provided by the MFA device, if the trust policy of the user requires MFA. type: str notes: - - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token). + - In order to use the session token in a following playbook task you must pass the I(access_key), + I(secret_key) and I(session_token) parameters to modules that should use the session credentials. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -RETURN = """ +RETURN = r""" sts_creds: description: The Credentials object returned by the AWS Security Token Service returned: always type: list sample: - access_key: ASXXXXXXXXXXXXXXXXXX + access_key: ASIAXXXXXXXXXXXXXXXX expiration: "2016-04-08T11:59:47+00:00" secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -54,26 +54,27 @@ changed: """ -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) - name: Get a session token community.aws.sts_session_token: + access_key: AKIA1EXAMPLE1EXAMPLE + secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE duration_seconds: 3600 register: session_credentials - name: Use the session token obtained above to tag an instance in account 123456789012 amazon.aws.ec2_tag: - aws_access_key: "{{ session_credentials.sts_creds.access_key }}" - aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}" - security_token: "{{ session_credentials.sts_creds.session_token }}" + access_key: "{{ session_credentials.sts_creds.access_key }}" + secret_key: "{{ session_credentials.sts_creds.secret_key }}" + session_token: "{{ session_credentials.sts_creds.session_token }}" resource: i-xyzxyz01 state: present tags: - MyNewTag: value - -''' + MyNewTag: value +""" try: import botocore @@ -81,35 +82,35 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def normalize_credentials(credentials): - access_key = credentials.get('AccessKeyId', None) - secret_key = credentials.get('SecretAccessKey', None) - session_token = credentials.get('SessionToken', None) - expiration = credentials.get('Expiration', None) + access_key = credentials.get("AccessKeyId", None) + secret_key = credentials.get("SecretAccessKey", None) + session_token = credentials.get("SessionToken", None) + expiration = credentials.get("Expiration", None) return { - 'access_key': access_key, - 'secret_key': secret_key, - 'session_token': session_token, - 'expiration': expiration + "access_key": access_key, + "secret_key": secret_key, + "session_token": session_token, + "expiration": expiration, } def get_session_token(connection, module): - duration_seconds = module.params.get('duration_seconds') - mfa_serial_number = module.params.get('mfa_serial_number') - mfa_token = module.params.get('mfa_token') + duration_seconds = module.params.get("duration_seconds") + mfa_serial_number = module.params.get("mfa_serial_number") + mfa_token = module.params.get("mfa_token") changed = False args = {} if duration_seconds is not None: - args['DurationSeconds'] = duration_seconds + args["DurationSeconds"] = duration_seconds if mfa_serial_number is not None: - args['SerialNumber'] = mfa_serial_number + args["SerialNumber"] = mfa_serial_number if mfa_token is not None: - args['TokenCode'] = mfa_token + args["TokenCode"] = mfa_token try: response = connection.get_session_token(**args) @@ -117,13 +118,13 @@ def get_session_token(connection, module): except ClientError as e: module.fail_json(msg=e) - credentials = normalize_credentials(response.get('Credentials', {})) + credentials = normalize_credentials(response.get("Credentials", {})) module.exit_json(changed=changed, sts_creds=credentials) def main(): argument_spec = dict( - duration_seconds=dict(required=False, default=None, type='int'), + duration_seconds=dict(required=False, default=None, type="int"), mfa_serial_number=dict(required=False, default=None), mfa_token=dict(required=False, default=None, no_log=True), ) @@ -131,12 +132,12 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) try: - connection = module.client('sts') + connection = module.client("sts") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") get_session_token(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/waf_condition.py b/ansible_collections/community/aws/plugins/modules/waf_condition.py index 63585d50c..5b08cb6de 100644 --- a/ansible_collections/community/aws/plugins/modules/waf_condition.py +++ b/ansible_collections/community/aws/plugins/modules/waf_condition.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Will Thames # Copyright (c) 2015 Mike Mochan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: waf_condition short_description: Create and delete WAF Conditions version_added: 1.0.0 @@ -20,10 +18,6 @@ description: author: - Will Thames (@willthames) - Mike Mochan (@mmochan) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: @@ -137,77 +131,81 @@ options: - absent default: present type: str -''' -EXAMPLES = r''' - - name: create WAF byte condition - community.aws.waf_condition: - name: my_byte_condition - filters: +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: create WAF byte condition + community.aws.waf_condition: + name: my_byte_condition + filters: - field_to_match: header position: STARTS_WITH target_string: Hello header: Content-type - type: byte - - - name: create WAF geo condition - community.aws.waf_condition: - name: my_geo_condition - filters: - - country: US - - country: AU - - country: AT - type: geo - - - name: create IP address condition - community.aws.waf_condition: - name: "{{ resource_prefix }}_ip_condition" - filters: - - ip_address: "10.0.0.0/8" - - ip_address: "192.168.0.0/24" - type: ip - - - name: create WAF regex condition - community.aws.waf_condition: - name: my_regex_condition - filters: - - field_to_match: query_string - regex_pattern: - name: greetings - regex_strings: - - '[hH]ello' - - '^Hi there' - - '.*Good Day to You' - type: regex - - - name: create WAF size condition - community.aws.waf_condition: - name: my_size_condition - filters: - - field_to_match: query_string - size: 300 - comparison: GT - type: size - - - name: create WAF sql injection condition - community.aws.waf_condition: - name: my_sql_condition - filters: - - field_to_match: query_string - transformation: url_decode - type: sql - - - name: create WAF xss condition - community.aws.waf_condition: - name: my_xss_condition - filters: - - field_to_match: query_string - transformation: url_decode - type: xss - -''' - -RETURN = r''' + type: byte + +- name: create WAF geo condition + community.aws.waf_condition: + name: my_geo_condition + filters: + - country: US + - country: AU + - country: AT + type: geo + +- name: create IP address condition + community.aws.waf_condition: + name: "{{ resource_prefix }}_ip_condition" + filters: + - ip_address: "10.0.0.0/8" + - ip_address: "192.168.0.0/24" + type: ip + +- name: create WAF regex condition + community.aws.waf_condition: + name: my_regex_condition + filters: + - field_to_match: query_string + regex_pattern: + name: greetings + regex_strings: + - '[hH]ello' + - '^Hi there' + - '.*Good Day to You' + type: regex + +- name: create WAF size condition + community.aws.waf_condition: + name: my_size_condition + filters: + - field_to_match: query_string + size: 300 + comparison: GT + type: size + +- name: create WAF sql injection condition + community.aws.waf_condition: + name: my_sql_condition + filters: + - field_to_match: query_string + transformation: url_decode + type: sql + +- name: create WAF xss condition + community.aws.waf_condition: + name: my_xss_condition + filters: + - field_to_match: query_string + transformation: url_decode + type: xss +""" + +RETURN = r""" condition: description: Condition returned by operation. returned: always @@ -397,7 +395,7 @@ condition: description: transformation applied to the text before matching. type: str sample: URL_DECODE -''' +""" try: import botocore @@ -406,85 +404,92 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP -from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class Condition(object): +class Condition(object): def __init__(self, client, module): self.client = client self.module = module - self.type = module.params['type'] - self.method_suffix = MATCH_LOOKUP[self.type]['method'] - self.conditionset = MATCH_LOOKUP[self.type]['conditionset'] - self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's' - self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id' - self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple'] - self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's' - self.conditiontype = MATCH_LOOKUP[self.type]['type'] + self.type = module.params["type"] + self.method_suffix = MATCH_LOOKUP[self.type]["method"] + self.conditionset = MATCH_LOOKUP[self.type]["conditionset"] + self.conditionsets = MATCH_LOOKUP[self.type]["conditionset"] + "s" + self.conditionsetid = MATCH_LOOKUP[self.type]["conditionset"] + "Id" + self.conditiontuple = MATCH_LOOKUP[self.type]["conditiontuple"] + self.conditiontuples = MATCH_LOOKUP[self.type]["conditiontuple"] + "s" + self.conditiontype = MATCH_LOOKUP[self.type]["type"] def format_for_update(self, condition_set_id): # Prep kwargs kwargs = dict() - kwargs['Updates'] = list() + kwargs["Updates"] = list() - for filtr in self.module.params.get('filters'): + for filtr in self.module.params.get("filters"): # Only for ip_set - if self.type == 'ip': + if self.type == "ip": # there might be a better way of detecting an IPv6 address - if ':' in filtr.get('ip_address'): - ip_type = 'IPV6' + if ":" in filtr.get("ip_address"): + ip_type = "IPV6" else: - ip_type = 'IPV4' - condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')} + ip_type = "IPV4" + condition_insert = {"Type": ip_type, "Value": filtr.get("ip_address")} # Specific for geo_match_set - if self.type == 'geo': - condition_insert = dict(Type='Country', Value=filtr.get('country')) + if self.type == "geo": + condition_insert = dict(Type="Country", Value=filtr.get("country")) # Common For everything but ip_set and geo_match_set - if self.type not in ('ip', 'geo'): - - condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()), - TextTransformation=filtr.get('transformation', 'none').upper()) - - if filtr.get('field_to_match').upper() == "HEADER": - if filtr.get('header'): - condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower() + if self.type not in ("ip", "geo"): + condition_insert = dict( + FieldToMatch=dict(Type=filtr.get("field_to_match").upper()), + TextTransformation=filtr.get("transformation", "none").upper(), + ) + + if filtr.get("field_to_match").upper() == "HEADER": + if filtr.get("header"): + condition_insert["FieldToMatch"]["Data"] = filtr.get("header").lower() else: self.module.fail_json(msg=str("DATA required when HEADER requested")) # Specific for byte_match_set - if self.type == 'byte': - condition_insert['TargetString'] = filtr.get('target_string') - condition_insert['PositionalConstraint'] = filtr.get('position') + if self.type == "byte": + condition_insert["TargetString"] = filtr.get("target_string") + condition_insert["PositionalConstraint"] = filtr.get("position") # Specific for size_constraint_set - if self.type == 'size': - condition_insert['ComparisonOperator'] = filtr.get('comparison') - condition_insert['Size'] = filtr.get('size') + if self.type == "size": + condition_insert["ComparisonOperator"] = filtr.get("comparison") + condition_insert["Size"] = filtr.get("size") # Specific for regex_match_set - if self.type == 'regex': - condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId'] + if self.type == "regex": + condition_insert["RegexPatternSetId"] = self.ensure_regex_pattern_present(filtr.get("regex_pattern"))[ + "RegexPatternSetId" + ] - kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert}) + kwargs["Updates"].append({"Action": "INSERT", self.conditiontuple: condition_insert}) kwargs[self.conditionsetid] = condition_set_id return kwargs def format_for_deletion(self, condition): - return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple} - for current_condition_tuple in condition[self.conditiontuples]], - self.conditionsetid: condition[self.conditionsetid]} + return { + "Updates": [ + {"Action": "DELETE", self.conditiontuple: current_condition_tuple} + for current_condition_tuple in condition[self.conditiontuples] + ], + self.conditionsetid: condition[self.conditionsetid], + } @AWSRetry.exponential_backoff() def list_regex_patterns_with_backoff(self, **params): @@ -502,60 +507,77 @@ class Condition(object): try: response = self.list_regex_patterns_with_backoff(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list regex patterns') - regex_patterns.extend(response['RegexPatternSets']) - if 'NextMarker' in response: - params['NextMarker'] = response['NextMarker'] + self.module.fail_json_aws(e, msg="Could not list regex patterns") + regex_patterns.extend(response["RegexPatternSets"]) + if "NextMarker" in response: + params["NextMarker"] = response["NextMarker"] else: break return regex_patterns def get_regex_pattern_by_name(self, name): existing_regex_patterns = self.list_regex_patterns() - regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns) + regex_lookup = dict((item["Name"], item["RegexPatternSetId"]) for item in existing_regex_patterns) if name in regex_lookup: - return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet'] + return self.get_regex_pattern_set_with_backoff(regex_lookup[name])["RegexPatternSet"] else: return None def ensure_regex_pattern_present(self, regex_pattern): - name = regex_pattern['name'] + name = regex_pattern["name"] pattern_set = self.get_regex_pattern_by_name(name) if not pattern_set: - pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name}, - self.client.create_regex_pattern_set)['RegexPatternSet'] - missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings']) - extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings']) + pattern_set = run_func_with_change_token_backoff( + self.client, self.module, {"Name": name}, self.client.create_regex_pattern_set + )["RegexPatternSet"] + missing = set(regex_pattern["regex_strings"]) - set(pattern_set["RegexPatternStrings"]) + extra = set(pattern_set["RegexPatternStrings"]) - set(regex_pattern["regex_strings"]) if not missing and not extra: return pattern_set - updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing] - updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra]) - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates}, - self.client.update_regex_pattern_set, wait=True) - return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet'] + updates = [{"Action": "INSERT", "RegexPatternString": pattern} for pattern in missing] + updates.extend([{"Action": "DELETE", "RegexPatternString": pattern} for pattern in extra]) + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": pattern_set["RegexPatternSetId"], "Updates": updates}, + self.client.update_regex_pattern_set, + wait=True, + ) + return self.get_regex_pattern_set_with_backoff(pattern_set["RegexPatternSetId"])["RegexPatternSet"] def delete_unused_regex_pattern(self, regex_pattern_set_id): try: - regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet'] + regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)[ + "RegexPatternSet" + ] updates = list() - for regex_pattern_string in regex_pattern_set['RegexPatternStrings']: - updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string}) - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates}, - self.client.update_regex_pattern_set) - - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': regex_pattern_set_id}, - self.client.delete_regex_pattern_set, wait=True) - except is_boto3_error_code('WAFNonexistentItemException'): + for regex_pattern_string in regex_pattern_set["RegexPatternStrings"]: + updates.append({"Action": "DELETE", "RegexPatternString": regex_pattern_string}) + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": regex_pattern_set_id, "Updates": updates}, + self.client.update_regex_pattern_set, + ) + + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": regex_pattern_set_id}, + self.client.delete_regex_pattern_set, + wait=True, + ) + except is_boto3_error_code("WAFNonexistentItemException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg='Could not delete regex pattern') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Could not delete regex pattern") def get_condition_by_name(self, name): - all_conditions = [d for d in self.list_conditions() if d['Name'] == name] + all_conditions = [d for d in self.list_conditions() if d["Name"] == name] if all_conditions: return all_conditions[0][self.conditionsetid] @@ -563,17 +585,17 @@ class Condition(object): def get_condition_by_id_with_backoff(self, condition_set_id): params = dict() params[self.conditionsetid] = condition_set_id - func = getattr(self.client, 'get_' + self.method_suffix) + func = getattr(self.client, "get_" + self.method_suffix) return func(**params)[self.conditionset] def get_condition_by_id(self, condition_set_id): try: return self.get_condition_by_id_with_backoff(condition_set_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not get condition') + self.module.fail_json_aws(e, msg="Could not get condition") def list_conditions(self): - method = 'list_' + self.method_suffix + 's' + method = "list_" + self.method_suffix + "s" try: paginator = self.client.get_paginator(method) func = paginator.paginate().build_full_result @@ -583,66 +605,68 @@ class Condition(object): try: return func()[self.conditionsets] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type) + self.module.fail_json_aws(e, msg=f"Could not list {self.type} conditions") def tidy_up_regex_patterns(self, regex_match_set): all_regex_match_sets = self.list_conditions() all_match_set_patterns = list() for rms in all_regex_match_sets: - all_match_set_patterns.extend(conditiontuple['RegexPatternSetId'] - for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples]) + all_match_set_patterns.extend( + conditiontuple["RegexPatternSetId"] + for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples] + ) for filtr in regex_match_set[self.conditiontuples]: - if filtr['RegexPatternSetId'] not in all_match_set_patterns: - self.delete_unused_regex_pattern(filtr['RegexPatternSetId']) + if filtr["RegexPatternSetId"] not in all_match_set_patterns: + self.delete_unused_regex_pattern(filtr["RegexPatternSetId"]) def find_condition_in_rules(self, condition_set_id): rules_in_use = [] try: - if self.client.__class__.__name__ == 'WAF': + if self.client.__class__.__name__ == "WAF": all_rules = list_rules_with_backoff(self.client) - elif self.client.__class__.__name__ == 'WAFRegional': + elif self.client.__class__.__name__ == "WAFRegional": all_rules = list_regional_rules_with_backoff(self.client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list rules') + self.module.fail_json_aws(e, msg="Could not list rules") for rule in all_rules: try: - rule_details = get_rule_with_backoff(self.client, rule['RuleId']) + rule_details = get_rule_with_backoff(self.client, rule["RuleId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not get rule details') - if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]: - rules_in_use.append(rule_details['Name']) + self.module.fail_json_aws(e, msg="Could not get rule details") + if condition_set_id in [predicate["DataId"] for predicate in rule_details["Predicates"]]: + rules_in_use.append(rule_details["Name"]) return rules_in_use def find_and_delete_condition(self, condition_set_id): current_condition = self.get_condition_by_id(condition_set_id) in_use_rules = self.find_condition_in_rules(condition_set_id) if in_use_rules: - rulenames = ', '.join(in_use_rules) - self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames)) + rulenames = ", ".join(in_use_rules) + self.module.fail_json(msg=f"Condition {current_condition['Name']} is in use by {rulenames}") if current_condition[self.conditiontuples]: # Filters are deleted using update with the DELETE action - func = getattr(self.client, 'update_' + self.method_suffix) + func = getattr(self.client, "update_" + self.method_suffix) params = self.format_for_deletion(current_condition) try: # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call run_func_with_change_token_backoff(self.client, self.module, params, func) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not delete filters from condition') - func = getattr(self.client, 'delete_' + self.method_suffix) + self.module.fail_json_aws(e, msg="Could not delete filters from condition") + func = getattr(self.client, "delete_" + self.method_suffix) params = dict() params[self.conditionsetid] = condition_set_id try: run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not delete condition') + self.module.fail_json_aws(e, msg="Could not delete condition") # tidy up regex patterns - if self.type == 'regex': + if self.type == "regex": self.tidy_up_regex_patterns(current_condition) return True, {} def find_missing(self, update, current_condition): missing = [] - for desired in update['Updates']: + for desired in update["Updates"]: found = False desired_condition = desired[self.conditiontuple] current_conditions = current_condition[self.conditiontuples] @@ -657,39 +681,41 @@ class Condition(object): current_condition = self.get_condition_by_id(condition_set_id) update = self.format_for_update(condition_set_id) missing = self.find_missing(update, current_condition) - if self.module.params.get('purge_filters'): - extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple} - for current_tuple in current_condition[self.conditiontuples] - if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]] + if self.module.params.get("purge_filters"): + extra = [ + {"Action": "DELETE", self.conditiontuple: current_tuple} + for current_tuple in current_condition[self.conditiontuples] + if current_tuple not in [desired[self.conditiontuple] for desired in update["Updates"]] + ] else: extra = [] changed = bool(missing or extra) if changed: - update['Updates'] = missing + extra - func = getattr(self.client, 'update_' + self.method_suffix) + update["Updates"] = missing + extra + func = getattr(self.client, "update_" + self.method_suffix) try: result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not update condition') + self.module.fail_json_aws(e, msg="Could not update condition") return changed, self.get_condition_by_id(condition_set_id) def ensure_condition_present(self): - name = self.module.params['name'] + name = self.module.params["name"] condition_set_id = self.get_condition_by_name(name) if condition_set_id: return self.find_and_update_condition(condition_set_id) else: params = dict() - params['Name'] = name - func = getattr(self.client, 'create_' + self.method_suffix) + params["Name"] = name + func = getattr(self.client, "create_" + self.method_suffix) try: condition = run_func_with_change_token_backoff(self.client, self.module, params, func) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not create condition') + self.module.fail_json_aws(e, msg="Could not create condition") return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid]) def ensure_condition_absent(self): - condition_set_id = self.get_condition_by_name(self.module.params['name']) + condition_set_id = self.get_condition_by_name(self.module.params["name"]) if condition_set_id: return self.find_and_delete_condition(condition_set_id) return False, {} @@ -698,45 +724,46 @@ class Condition(object): def main(): filters_subspec = dict( country=dict(), - field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']), + field_to_match=dict(choices=["uri", "query_string", "header", "method", "body"]), header=dict(), - transformation=dict(choices=['none', 'compress_white_space', - 'html_entity_decode', 'lowercase', - 'cmd_line', 'url_decode']), - position=dict(choices=['exactly', 'starts_with', 'ends_with', - 'contains', 'contains_word']), - comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']), + transformation=dict( + choices=["none", "compress_white_space", "html_entity_decode", "lowercase", "cmd_line", "url_decode"] + ), + position=dict(choices=["exactly", "starts_with", "ends_with", "contains", "contains_word"]), + comparison=dict(choices=["EQ", "NE", "LE", "LT", "GE", "GT"]), target_string=dict(), # Bytes - size=dict(type='int'), + size=dict(type="int"), ip_address=dict(), regex_pattern=dict(), ) argument_spec = dict( name=dict(required=True), - type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']), - filters=dict(type='list', elements='dict'), - purge_filters=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), + type=dict(required=True, choices=["byte", "geo", "ip", "regex", "size", "sql", "xss"]), + filters=dict(type="list", elements="dict"), + purge_filters=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[["state", "present", ["filters"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['filters']]]) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) condition = Condition(client, module) - if state == 'present': + if state == "present": (changed, results) = condition.ensure_condition_present() # return a condition agnostic ID for use by waf_rule - results['ConditionId'] = results[condition.conditionsetid] + results["ConditionId"] = results[condition.conditionsetid] else: (changed, results) = condition.ensure_condition_absent() module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/waf_info.py b/ansible_collections/community/aws/plugins/modules/waf_info.py index 6a49a886e..711d1d8de 100644 --- a/ansible_collections/community/aws/plugins/modules/waf_info.py +++ b/ansible_collections/community/aws/plugins/modules/waf_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: waf_info short_description: Retrieve information for WAF ACLs, Rules, Conditions and Filters version_added: 1.0.0 @@ -29,12 +27,12 @@ author: - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: obtain all WAF information community.aws.waf_info: @@ -46,9 +44,9 @@ EXAMPLES = ''' community.aws.waf_info: name: test_waf waf_regional: true -''' +""" -RETURN = ''' +RETURN = r""" wafs: description: The WAFs that match the passed arguments. returned: success @@ -114,31 +112,31 @@ wafs: "type": "ByteMatch" } ] -''' +""" + +from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def main(): argument_spec = dict( name=dict(required=False), - waf_regional=dict(type='bool', default=False) + waf_regional=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) web_acls = list_web_acls(client, module) - name = module.params['name'] + name = module.params["name"] if name: - web_acls = [web_acl for web_acl in web_acls if - web_acl['Name'] == name] + web_acls = [web_acl for web_acl in web_acls if web_acl["Name"] == name] if not web_acls: - module.fail_json(msg="WAF named %s not found" % name) - module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId']) - for web_acl in web_acls]) + module.fail_json(msg=f"WAF named {name} not found") + module.exit_json(wafs=[get_web_acl(client, module, web_acl["WebACLId"]) for web_acl in web_acls]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/waf_rule.py b/ansible_collections/community/aws/plugins/modules/waf_rule.py index a994b1831..87a02bbbd 100644 --- a/ansible_collections/community/aws/plugins/modules/waf_rule.py +++ b/ansible_collections/community/aws/plugins/modules/waf_rule.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Will Thames # Copyright (c) 2015 Mike Mochan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: waf_rule short_description: Create and delete WAF Rules version_added: 1.0.0 @@ -20,10 +18,6 @@ description: author: - Mike Mochan (@mmochan) - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: @@ -71,30 +65,35 @@ options: default: false required: false type: bool -''' - -EXAMPLES = r''' - - name: create WAF rule - community.aws.waf_rule: - name: my_waf_rule - conditions: - - name: my_regex_condition - type: regex - negated: false - - name: my_geo_condition - type: geo - negated: false - - name: my_byte_condition - type: byte - negated: true - - - name: remove WAF rule - community.aws.waf_rule: - name: "my_waf_rule" - state: absent -''' - -RETURN = r''' + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: create WAF rule + community.aws.waf_rule: + name: my_waf_rule + conditions: + - name: my_regex_condition + type: regex + negated: false + - name: my_geo_condition + type: geo + negated: false + - name: my_byte_condition + type: byte + negated: true + +- name: remove WAF rule + community.aws.waf_rule: + name: "my_waf_rule" + state: absent +""" + +RETURN = r""" rule: description: WAF rule contents returned: always @@ -135,7 +134,7 @@ rule: returned: always type: str sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261 -''' +""" import re @@ -144,62 +143,62 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.waf import ( - MATCH_LOOKUP, - list_regional_rules_with_backoff, - list_rules_with_backoff, - run_func_with_change_token_backoff, - get_web_acl_with_backoff, - list_web_acls_with_backoff, - list_regional_web_acls_with_backoff, -) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP +from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_rule_by_name(client, module, name): - rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name] + rules = [d["RuleId"] for d in list_rules(client, module) if d["Name"] == name] if rules: return rules[0] def get_rule(client, module, rule_id): try: - return client.get_rule(RuleId=rule_id)['Rule'] + return client.get_rule(RuleId=rule_id)["Rule"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get WAF rule') + module.fail_json_aws(e, msg="Could not get WAF rule") def list_rules(client, module): - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": try: return list_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF rules') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not list WAF rules") + elif client.__class__.__name__ == "WAFRegional": try: return list_regional_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF Regional rules') + module.fail_json_aws(e, msg="Could not list WAF Regional rules") def list_regional_rules(client, module): try: return list_regional_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF rules') + module.fail_json_aws(e, msg="Could not list WAF rules") def find_and_update_rule(client, module, rule_id): rule = get_rule(client, module, rule_id) - rule_id = rule['RuleId'] + rule_id = rule["RuleId"] existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) all_conditions = dict() for condition_type in MATCH_LOOKUP: - method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's' + method = "list_" + MATCH_LOOKUP[condition_type]["method"] + "s" all_conditions[condition_type] = dict() try: paginator = client.get_paginator(method) @@ -209,125 +208,133 @@ def find_and_update_rule(client, module, rule_id): # and throw different exceptions func = getattr(client, method) try: - pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's'] + pred_results = func()[MATCH_LOOKUP[condition_type]["conditionset"] + "s"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type) + module.fail_json_aws(e, msg=f"Could not list {condition_type} conditions") for pred in pred_results: - pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id'] - all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred) - all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred) + pred["DataId"] = pred[MATCH_LOOKUP[condition_type]["conditionset"] + "Id"] + all_conditions[condition_type][pred["Name"]] = camel_dict_to_snake_dict(pred) + all_conditions[condition_type][pred["DataId"]] = camel_dict_to_snake_dict(pred) - for condition in module.params['conditions']: - desired_conditions[condition['type']][condition['name']] = condition + for condition in module.params["conditions"]: + desired_conditions[condition["type"]][condition["name"]] = condition - reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items()) - for condition in rule['Predicates']: - existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition) + reverse_condition_types = dict((v["type"], k) for (k, v) in MATCH_LOOKUP.items()) + for condition in rule["Predicates"]: + existing_conditions[reverse_condition_types[condition["Type"]]][condition["DataId"]] = camel_dict_to_snake_dict( + condition + ) insertions = list() deletions = list() for condition_type in desired_conditions: - for (condition_name, condition) in desired_conditions[condition_type].items(): + for condition_name, condition in desired_conditions[condition_type].items(): if condition_name not in all_conditions[condition_type]: - module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type)) - condition['data_id'] = all_conditions[condition_type][condition_name]['data_id'] - if condition['data_id'] not in existing_conditions[condition_type]: + module.fail_json(msg=f"Condition {condition_name} of type {condition_type} does not exist") + condition["data_id"] = all_conditions[condition_type][condition_name]["data_id"] + if condition["data_id"] not in existing_conditions[condition_type]: insertions.append(format_for_insertion(condition)) - if module.params['purge_conditions']: + if module.params["purge_conditions"]: for condition_type in existing_conditions: - deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values() - if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]]) + deletions.extend( + [ + format_for_deletion(condition) + for condition in existing_conditions[condition_type].values() + if not all_conditions[condition_type][condition["data_id"]]["name"] + in desired_conditions[condition_type] + ] + ) changed = bool(insertions or deletions) - update = { - 'RuleId': rule_id, - 'Updates': insertions + deletions - } + update = {"RuleId": rule_id, "Updates": insertions + deletions} if changed: try: run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update rule conditions') + module.fail_json_aws(e, msg="Could not update rule conditions") return changed, get_rule(client, module, rule_id) def format_for_insertion(condition): - return dict(Action='INSERT', - Predicate=dict(Negated=condition['negated'], - Type=MATCH_LOOKUP[condition['type']]['type'], - DataId=condition['data_id'])) + return dict( + Action="INSERT", + Predicate=dict( + Negated=condition["negated"], Type=MATCH_LOOKUP[condition["type"]]["type"], DataId=condition["data_id"] + ), + ) def format_for_deletion(condition): - return dict(Action='DELETE', - Predicate=dict(Negated=condition['negated'], - Type=condition['type'], - DataId=condition['data_id'])) + return dict( + Action="DELETE", + Predicate=dict(Negated=condition["negated"], Type=condition["type"], DataId=condition["data_id"]), + ) def remove_rule_conditions(client, module, rule_id): - conditions = get_rule(client, module, rule_id)['Predicates'] + conditions = get_rule(client, module, rule_id)["Predicates"] updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions] try: - run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule) + run_func_with_change_token_backoff(client, module, {"RuleId": rule_id, "Updates": updates}, client.update_rule) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not remove rule conditions') + module.fail_json_aws(e, msg="Could not remove rule conditions") def ensure_rule_present(client, module): - name = module.params['name'] + name = module.params["name"] rule_id = get_rule_by_name(client, module, name) params = dict() if rule_id: return find_and_update_rule(client, module, rule_id) else: - params['Name'] = module.params['name'] - metric_name = module.params['metric_name'] + params["Name"] = module.params["name"] + metric_name = module.params["metric_name"] if not metric_name: - metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name']) - params['MetricName'] = metric_name + metric_name = re.sub(r"[^a-zA-Z0-9]", "", module.params["name"]) + params["MetricName"] = metric_name try: - new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule'] + new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)["Rule"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not create rule') - return find_and_update_rule(client, module, new_rule['RuleId']) + module.fail_json_aws(e, msg="Could not create rule") + return find_and_update_rule(client, module, new_rule["RuleId"]) def find_rule_in_web_acls(client, module, rule_id): web_acls_in_use = [] try: - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": all_web_acls = list_web_acls_with_backoff(client) - elif client.__class__.__name__ == 'WAFRegional': + elif client.__class__.__name__ == "WAFRegional": all_web_acls = list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list Web ACLs') + module.fail_json_aws(e, msg="Could not list Web ACLs") for web_acl in all_web_acls: try: - web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId']) + web_acl_details = get_web_acl_with_backoff(client, web_acl["WebACLId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACL details') - if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]: - web_acls_in_use.append(web_acl_details['Name']) + module.fail_json_aws(e, msg="Could not get Web ACL details") + if rule_id in [rule["RuleId"] for rule in web_acl_details["Rules"]]: + web_acls_in_use.append(web_acl_details["Name"]) return web_acls_in_use def ensure_rule_absent(client, module): - rule_id = get_rule_by_name(client, module, module.params['name']) + rule_id = get_rule_by_name(client, module, module.params["name"]) in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) if in_use_web_acls: - web_acl_names = ', '.join(in_use_web_acls) - module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % - (module.params['name'], web_acl_names)) + web_acl_names = ", ".join(in_use_web_acls) + module.fail_json(msg=f"Rule {module.params['name']} is in use by Web ACL(s) {web_acl_names}") if rule_id: remove_rule_conditions(client, module, rule_id) try: - return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True) + return True, run_func_with_change_token_backoff( + client, module, {"RuleId": rule_id}, client.delete_rule, wait=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not delete rule') + module.fail_json_aws(e, msg="Could not delete rule") return False, {} @@ -335,17 +342,17 @@ def main(): argument_spec = dict( name=dict(required=True), metric_name=dict(), - state=dict(default='present', choices=['present', 'absent']), - conditions=dict(type='list', elements='dict'), - purge_conditions=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False), + state=dict(default="present", choices=["present", "absent"]), + conditions=dict(type="list", elements="dict"), + purge_conditions=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) - if state == 'present': + if state == "present": (changed, results) = ensure_rule_present(client, module) else: (changed, results) = ensure_rule_absent(client, module) @@ -353,5 +360,5 @@ def main(): module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/waf_web_acl.py b/ansible_collections/community/aws/plugins/modules/waf_web_acl.py index 9d5ad59e4..021ca568d 100644 --- a/ansible_collections/community/aws/plugins/modules/waf_web_acl.py +++ b/ansible_collections/community/aws/plugins/modules/waf_web_acl.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: waf_web_acl short_description: Create and delete WAF Web ACLs version_added: 1.0.0 @@ -19,10 +17,6 @@ description: author: - Mike Mochan (@mmochan) - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: @@ -85,27 +79,32 @@ options: default: false required: false type: bool -''' - -EXAMPLES = r''' - - name: create web ACL - community.aws.waf_web_acl: - name: my_web_acl - rules: - - name: my_rule - priority: 1 - action: block - default_action: block - purge_rules: true - state: present - - - name: delete the web acl - community.aws.waf_web_acl: - name: my_web_acl - state: absent -''' - -RETURN = r''' + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: create web ACL + community.aws.waf_web_acl: + name: my_web_acl + rules: + - name: my_rule + priority: 1 + action: block + default_action: block + purge_rules: true + state: present + +- name: delete the web acl + community.aws.waf_web_acl: + name: my_web_acl + state: absent +""" + +RETURN = r""" web_acl: description: contents of the Web ACL. returned: always @@ -158,29 +157,29 @@ web_acl: returned: always type: str sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c -''' +""" + +import re try: import botocore except ImportError: pass # handled by AnsibleAWSModule -import re +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.waf import ( - list_regional_rules_with_backoff, - list_regional_web_acls_with_backoff, - list_rules_with_backoff, - list_web_acls_with_backoff, - run_func_with_change_token_backoff, -) + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_web_acl_by_name(client, module, name): - acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name] + acls = [d["WebACLId"] for d in list_web_acls(client, module) if d["Name"] == name] if acls: return acls[0] else: @@ -188,91 +187,93 @@ def get_web_acl_by_name(client, module, name): def create_rule_lookup(client, module): - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": try: rules = list_rules_with_backoff(client) - return dict((rule['Name'], rule) for rule in rules) + return dict((rule["Name"], rule) for rule in rules) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list rules') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not list rules") + elif client.__class__.__name__ == "WAFRegional": try: rules = list_regional_rules_with_backoff(client) - return dict((rule['Name'], rule) for rule in rules) + return dict((rule["Name"], rule) for rule in rules) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list regional rules') + module.fail_json_aws(e, msg="Could not list regional rules") def get_web_acl(client, module, web_acl_id): try: - return client.get_web_acl(WebACLId=web_acl_id)['WebACL'] + return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id) + module.fail_json_aws(e, msg=f"Could not get Web ACL with id {web_acl_id}") -def list_web_acls(client, module,): - if client.__class__.__name__ == 'WAF': +def list_web_acls( + client, + module, +): + if client.__class__.__name__ == "WAF": try: return list_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACLs') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not get Web ACLs") + elif client.__class__.__name__ == "WAFRegional": try: return list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACLs') + module.fail_json_aws(e, msg="Could not get Web ACLs") def find_and_update_web_acl(client, module, web_acl_id): acl = get_web_acl(client, module, web_acl_id) rule_lookup = create_rule_lookup(client, module) - existing_rules = acl['Rules'] - desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'], - 'Priority': rule['priority'], - 'Action': {'Type': rule['action'].upper()}, - 'Type': rule.get('type', 'regular').upper()} - for rule in module.params['rules']] + existing_rules = acl["Rules"] + desired_rules = [ + { + "RuleId": rule_lookup[rule["name"]]["RuleId"], + "Priority": rule["priority"], + "Action": {"Type": rule["action"].upper()}, + "Type": rule.get("type", "regular").upper(), + } + for rule in module.params["rules"] + ] missing = [rule for rule in desired_rules if rule not in existing_rules] extras = [] - if module.params['purge_rules']: + if module.params["purge_rules"]: extras = [rule for rule in existing_rules if rule not in desired_rules] - insertions = [format_for_update(rule, 'INSERT') for rule in missing] - deletions = [format_for_update(rule, 'DELETE') for rule in extras] + insertions = [format_for_update(rule, "INSERT") for rule in missing] + deletions = [format_for_update(rule, "DELETE") for rule in extras] changed = bool(insertions + deletions) # Purge rules before adding new ones in case a deletion shares the same # priority as an insertion. - params = { - 'WebACLId': acl['WebACLId'], - 'DefaultAction': acl['DefaultAction'] - } + params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"]} change_tokens = [] if deletions: try: - params['Updates'] = deletions + params["Updates"] = deletions result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result['ChangeToken']) + change_tokens.append(result["ChangeToken"]) get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=result['ChangeToken'] - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=result["ChangeToken"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update Web ACL') + module.fail_json_aws(e, msg="Could not update Web ACL") if insertions: try: - params['Updates'] = insertions + params["Updates"] = insertions result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result['ChangeToken']) + change_tokens.append(result["ChangeToken"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update Web ACL') + module.fail_json_aws(e, msg="Could not update Web ACL") if change_tokens: for token in change_tokens: get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=token - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=token) if changed: acl = get_web_acl(client, module, web_acl_id) return changed, acl @@ -282,77 +283,79 @@ def format_for_update(rule, action): return dict( Action=action, ActivatedRule=dict( - Priority=rule['Priority'], - RuleId=rule['RuleId'], - Action=dict( - Type=rule['Action']['Type'] - ) - ) + Priority=rule["Priority"], + RuleId=rule["RuleId"], + Action=dict(Type=rule["Action"]["Type"]), + ), ) def remove_rules_from_web_acl(client, module, web_acl_id): acl = get_web_acl(client, module, web_acl_id) - deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']] + deletions = [format_for_update(rule, "DELETE") for rule in acl["Rules"]] try: - params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions} + params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"], "Updates": deletions} run_func_with_change_token_backoff(client, module, params, client.update_web_acl) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not remove rule') + module.fail_json_aws(e, msg="Could not remove rule") def ensure_web_acl_present(client, module): changed = False result = None - name = module.params['name'] + name = module.params["name"] web_acl_id = get_web_acl_by_name(client, module, name) if web_acl_id: (changed, result) = find_and_update_web_acl(client, module, web_acl_id) else: - metric_name = module.params['metric_name'] + metric_name = module.params["metric_name"] if not metric_name: - metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name']) - default_action = module.params['default_action'].upper() + metric_name = re.sub(r"[^A-Za-z0-9]", "", module.params["name"]) + default_action = module.params["default_action"].upper() try: - params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}} + params = {"Name": name, "MetricName": metric_name, "DefaultAction": {"Type": default_action}} new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not create Web ACL') - (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId']) + module.fail_json_aws(e, msg="Could not create Web ACL") + (changed, result) = find_and_update_web_acl(client, module, new_web_acl["WebACL"]["WebACLId"]) return changed, result def ensure_web_acl_absent(client, module): - web_acl_id = get_web_acl_by_name(client, module, module.params['name']) + web_acl_id = get_web_acl_by_name(client, module, module.params["name"]) if web_acl_id: web_acl = get_web_acl(client, module, web_acl_id) - if web_acl['Rules']: + if web_acl["Rules"]: remove_rules_from_web_acl(client, module, web_acl_id) try: - run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True) + run_func_with_change_token_backoff( + client, module, {"WebACLId": web_acl_id}, client.delete_web_acl, wait=True + ) return True, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not delete Web ACL') + module.fail_json_aws(e, msg="Could not delete Web ACL") return False, {} def main(): argument_spec = dict( name=dict(required=True), - default_action=dict(choices=['block', 'allow', 'count']), + default_action=dict(choices=["block", "allow", "count"]), metric_name=dict(), - state=dict(default='present', choices=['present', 'absent']), - rules=dict(type='list', elements='dict'), - purge_rules=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False) + state=dict(default="present", choices=["present", "absent"]), + rules=dict(type="list", elements="dict"), + purge_rules=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[["state", "present", ["default_action", "rules"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['default_action', 'rules']]]) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) - if state == 'present': + if state == "present": (changed, results) = ensure_web_acl_present(client, module) else: (changed, results) = ensure_web_acl_absent(client, module) @@ -360,5 +363,5 @@ def main(): module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py index 7a9011e9b..b96ba0cb1 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_ip_set version_added: 1.5.0 @@ -63,14 +62,13 @@ notes: - Support for I(purge_tags) was added in release 4.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: test ip set wafv2_ip_set: name: test02 @@ -84,9 +82,9 @@ EXAMPLES = ''' tags: A: B C: D -''' +""" -RETURN = """ +RETURN = r""" addresses: description: Current addresses of the ip set sample: @@ -117,13 +115,16 @@ name: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags @@ -137,41 +138,36 @@ class IpSet: self.existing_set, self.id, self.locktoken, self.arn = self.get_set() def description(self): - return self.existing_set.get('Description') + return self.existing_set.get("Description") def _format_set(self, ip_set): if ip_set is None: return None - return camel_dict_to_snake_dict(self.existing_set, ignore_list=['tags']) + return camel_dict_to_snake_dict(self.existing_set, ignore_list=["tags"]) def get(self): return self._format_set(self.existing_set) def remove(self): try: - response = self.wafv2.delete_ip_set( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + response = self.wafv2.delete_ip_set(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.") return {} def create(self, description, ip_address_version, addresses, tags): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'IPAddressVersion': ip_address_version, - 'Addresses': addresses, + "Name": self.name, + "Scope": self.scope, + "IPAddressVersion": ip_address_version, + "Addresses": addresses, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_ip_set(**req_obj) @@ -183,15 +179,15 @@ class IpSet: def update(self, description, addresses): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'Addresses': addresses, - 'LockToken': self.locktoken + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "Addresses": addresses, + "LockToken": self.locktoken, } if description: - req_obj['Description'] = description + req_obj["Description"] = description try: response = self.wafv2.update_ip_set(**req_obj) @@ -207,38 +203,31 @@ class IpSet: id = None arn = None locktoken = None - for item in response.get('IPSets'): - if item.get('Name') == self.name: - id = item.get('Id') - locktoken = item.get('LockToken') - arn = item.get('ARN') + for item in response.get("IPSets"): + if item.get("Name") == self.name: + id = item.get("Id") + locktoken = item.get("LockToken") + arn = item.get("ARN") if id: try: - existing_set = self.wafv2.get_ip_set( - Name=self.name, - Scope=self.scope, - Id=id - ).get('IPSet') + existing_set = self.wafv2.get_ip_set(Name=self.name, Scope=self.scope, Id=id).get("IPSet") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 ip set.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_set['tags'] = tags + existing_set["tags"] = tags return existing_set, id, locktoken, arn def list(self, Nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': self.scope, - 'Limit': 100 - } + req_obj = {"Scope": self.scope, "Limit": 100} if Nextmarker: - req_obj['NextMarker'] = Nextmarker + req_obj["NextMarker"] = Nextmarker try: response = self.wafv2.list_ip_sets(**req_obj) - if response.get('NextMarker'): - response['IPSets'] += self.list(Nextmarker=response.get('NextMarker')).get('IPSets') + if response.get("NextMarker"): + response["IPSets"] += self.list(Nextmarker=response.get("NextMarker")).get("IPSets") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to list wafv2 ip set.") @@ -248,11 +237,11 @@ class IpSet: def compare(existing_set, addresses, purge_addresses, state): diff = False new_rules = [] - existing_rules = existing_set.get('addresses') - if state == 'present': + existing_rules = existing_set.get("addresses") + if state == "present": if purge_addresses: new_rules = addresses - if sorted(addresses) != sorted(existing_set.get('addresses')): + if sorted(addresses) != sorted(existing_set.get("addresses")): diff = True else: @@ -274,23 +263,22 @@ def compare(existing_set, addresses, purge_addresses, state): def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - description=dict(type='str'), - ip_address_version=dict(type='str', choices=['IPV4', 'IPV6']), - addresses=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - purge_addresses=dict(type='bool', default=True), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + description=dict(type="str"), + ip_address_version=dict(type="str", choices=["IPV4", "IPV6"]), + addresses=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + purge_addresses=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['ip_address_version', 'addresses']]] + required_if=[["state", "present", ["ip_address_version", "addresses"]]], ) state = module.params.get("state") @@ -304,17 +292,18 @@ def main(): purge_addresses = module.params.get("purge_addresses") check_mode = module.check_mode - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") change = False retval = {} ip_set = IpSet(wafv2, name, scope, module.fail_json_aws) - if state == 'present': - + if state == "present": if ip_set.get(): - tags_updated = ensure_wafv2_tags(wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode) + tags_updated = ensure_wafv2_tags( + wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode + ) ips_updated, addresses = compare(ip_set.get(), addresses, purge_addresses, state) description_updated = bool(description) and ip_set.description() != description change = ips_updated or description_updated or tags_updated @@ -322,32 +311,23 @@ def main(): if module.check_mode: pass elif ips_updated or description_updated: - retval = ip_set.update( - description=description, - addresses=addresses - ) + retval = ip_set.update(description=description, addresses=addresses) elif tags_updated: retval, id, locktoken, arn = ip_set.get_set() else: if not check_mode: retval = ip_set.create( - description=description, - ip_address_version=ip_address_version, - addresses=addresses, - tags=tags + description=description, ip_address_version=ip_address_version, addresses=addresses, tags=tags ) change = True - if state == 'absent': + if state == "absent": if ip_set.get(): if addresses: if len(addresses) > 0: change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) if change and not check_mode: - retval = ip_set.update( - description=description, - addresses=addresses - ) + retval = ip_set.update(description=description, addresses=addresses) else: if not check_mode: retval = ip_set.remove() @@ -356,5 +336,5 @@ def main(): module.exit_json(changed=change, **retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py index b92c9a816..caca5cd70 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_ip_set_info version_added: 1.5.0 @@ -28,20 +27,19 @@ options: type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: test ip set wafv2_ip_set_info: name: test02 scope: REGIONAL -''' +""" -RETURN = """ +RETURN = r""" addresses: description: Current addresses of the ip set sample: @@ -72,28 +70,29 @@ name: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': scope, - 'Limit': 100 - } + req_obj = {"Scope": scope, "Limit": 100} if Nextmarker: - req_obj['NextMarker'] = Nextmarker + req_obj["NextMarker"] = Nextmarker try: response = wafv2.list_ip_sets(**req_obj) - if response.get('NextMarker'): - response['IPSets'] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get('NextMarker')).get('IPSets') + if response.get("NextMarker"): + response["IPSets"] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get("NextMarker")).get( + "IPSets" + ) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 ip set") return response @@ -101,21 +100,15 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): def get_ip_set(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_ip_set( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_ip_set(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 ip set") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]) ) module = AnsibleAWSModule( @@ -126,26 +119,26 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if ip set exist response = list_ip_sets(wafv2, scope, module.fail_json_aws) id = None - for item in response.get('IPSets'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("IPSets"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") retval = {} existing_set = None if id: existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_set.get('IPSet')) - retval['tags'] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} + retval = camel_dict_to_snake_dict(existing_set.get("IPSet")) + retval["tags"] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_resources.py b/ansible_collections/community/aws/plugins/modules/wafv2_resources.py index 527ee1087..b36f51712 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_resources.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_resources.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_resources version_added: 1.5.0 @@ -37,22 +36,21 @@ options: required: true extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: add test alb to waf string03 community.aws.wafv2_resources: name: string03 scope: REGIONAL state: present arn: "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" -''' +""" -RETURN = """ +RETURN = r""" resource_arns: description: Current resources where the wafv2 is applied on sample: @@ -62,22 +60,20 @@ resource_arns: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -85,9 +81,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): def list_wafv2_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) + response = wafv2.list_resources_for_web_acl(WebACLArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 web acl.") return response @@ -95,10 +89,7 @@ def list_wafv2_resources(wafv2, arn, fail_json_aws): def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): try: - response = wafv2.associate_web_acl( - WebACLArn=waf_arn, - ResourceArn=arn - ) + response = wafv2.associate_web_acl(WebACLArn=waf_arn, ResourceArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to add wafv2 web acl.") return response @@ -106,27 +97,24 @@ def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): def remove_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.disassociate_web_acl( - ResourceArn=arn - ) + response = wafv2.disassociate_web_acl(ResourceArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str'), - scope=dict(type='str', choices=['CLOUDFRONT', 'REGIONAL']), - arn=dict(type='str', required=True) + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str"), + scope=dict(type="str", choices=["CLOUDFRONT", "REGIONAL"]), + arn=dict(type="str", required=True), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['name', 'scope']]] + required_if=[["state", "present", ["name", "scope"]]], ) state = module.params.get("state") @@ -135,7 +123,7 @@ def main(): arn = module.params.get("arn") check_mode = module.check_mode - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists @@ -145,26 +133,26 @@ def main(): retval = {} change = False - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - waf_arn = existing_acl.get('WebACL').get('ARN') + waf_arn = existing_acl.get("WebACL").get("ARN") retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws) - if state == 'present': + if state == "present": if retval: - if arn not in retval.get('ResourceArns'): + if arn not in retval.get("ResourceArns"): change = True if not check_mode: retval = add_wafv2_resources(wafv2, waf_arn, arn, module.fail_json_aws) - elif state == 'absent': + elif state == "absent": if retval: - if arn in retval.get('ResourceArns'): + if arn in retval.get("ResourceArns"): change = True if not check_mode: retval = remove_resources(wafv2, arn, module.fail_json_aws) @@ -172,5 +160,5 @@ def main(): module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py index 3a2a7b5dd..5cafee1f6 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_resources_info version_added: 1.5.0 @@ -28,20 +27,19 @@ options: type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: get web acl community.aws.wafv2_resources_info: name: string03 scope: REGIONAL -''' +""" -RETURN = """ +RETURN = r""" resource_arns: description: Current resources where the wafv2 is applied on sample: @@ -51,22 +49,20 @@ resource_arns: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -78,19 +74,16 @@ def list_web_acls(wafv2, scope, fail_json_aws): def list_wafv2_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) + response = wafv2.list_resources_for_web_acl(WebACLArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 resources.") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( @@ -101,25 +94,25 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists response = list_web_acls(wafv2, scope, module.fail_json_aws) id = None retval = {} - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - arn = existing_acl.get('WebACL').get('ARN') + arn = existing_acl.get("WebACL").get("ARN") retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws)) module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py index 8e46853c8..e2a7fd1d4 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_rule_group version_added: 1.5.0 @@ -67,14 +66,13 @@ options: type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.tags -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: change description community.aws.wafv2_rule_group: name: test02 @@ -150,9 +148,9 @@ EXAMPLES = ''' A: B C: D register: out -''' +""" -RETURN = """ +RETURN = r""" arn: description: Rule group arn sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 @@ -200,19 +198,22 @@ visibility_config: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict class RuleGroup: @@ -226,20 +227,20 @@ class RuleGroup: def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'Rules': rules, - 'LockToken': self.locktoken, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "Rules": rules, + "LockToken": self.locktoken, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if description: - req_obj['Description'] = description + req_obj["Description"] = description try: response = self.wafv2.update_rule_group(**req_obj) @@ -251,11 +252,11 @@ class RuleGroup: if self.id is None: response = self.list() - for item in response.get('RuleGroups'): - if item.get('Name') == self.name: - self.id = item.get('Id') - self.locktoken = item.get('LockToken') - self.arn = item.get('ARN') + for item in response.get("RuleGroups"): + if item.get("Name") == self.name: + self.id = item.get("Id") + self.locktoken = item.get("LockToken") + self.arn = item.get("ARN") return self.refresh_group() @@ -263,18 +264,14 @@ class RuleGroup: existing_group = None if self.id: try: - response = self.wafv2.get_rule_group( - Name=self.name, - Scope=self.scope, - Id=self.id - ) - existing_group = response.get('RuleGroup') - self.locktoken = response.get('LockToken') + response = self.wafv2.get_rule_group(Name=self.name, Scope=self.scope, Id=self.id) + existing_group = response.get("RuleGroup") + self.locktoken = response.get("LockToken") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 rule group.") tags = describe_wafv2_tags(self.wafv2, self.arn, self.fail_json_aws) - existing_group['tags'] = tags or {} + existing_group["tags"] = tags or {} return existing_group @@ -289,10 +286,7 @@ class RuleGroup: def remove(self): try: response = self.wafv2.delete_rule_group( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken + Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken ) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to delete wafv2 rule group.") @@ -300,22 +294,22 @@ class RuleGroup: def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Capacity': capacity, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "Capacity": capacity, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_rule_group(**req_obj) @@ -328,26 +322,25 @@ class RuleGroup: def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - capacity=dict(type='int'), - description=dict(type='str'), - rules=dict(type='list', elements='dict'), - sampled_requests=dict(type='bool', default=False), - cloudwatch_metrics=dict(type='bool', default=True), - metric_name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - purge_rules=dict(default=True, type='bool'), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + capacity=dict(type="int"), + description=dict(type="str"), + rules=dict(type="list", elements="dict"), + sampled_requests=dict(type="bool", default=False), + cloudwatch_metrics=dict(type="bool", default=True), + metric_name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + purge_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['capacity', 'rules']]] + required_if=[["state", "present", ["capacity", "rules"]]], ) state = module.params.get("state") @@ -372,31 +365,26 @@ def main(): if not metric_name: metric_name = name - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") rule_group = RuleGroup(wafv2, name, scope, module.fail_json_aws) change = False retval = {} - if state == 'present': + if state == "present": if rule_group.get(): - tagging_change = ensure_wafv2_tags(wafv2, rule_group.arn, tags, purge_tags, - module.fail_json_aws, module.check_mode) - rules_change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) - description_change = bool(description) and (rule_group.get().get('Description') != description) + tagging_change = ensure_wafv2_tags( + wafv2, rule_group.arn, tags, purge_tags, module.fail_json_aws, module.check_mode + ) + rules_change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) + description_change = bool(description) and (rule_group.get().get("Description") != description) change = tagging_change or rules_change or description_change retval = rule_group.get() if module.check_mode: # In check mode nothing changes... pass elif rules_change or description_change: - retval = rule_group.update( - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name - ) + retval = rule_group.update(description, rules, sampled_requests, cloudwatch_metrics, metric_name) elif tagging_change: retval = rule_group.refresh_group() @@ -404,35 +392,25 @@ def main(): change = True if not check_mode: retval = rule_group.create( - capacity, - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - tags + capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags ) - elif state == 'absent': + elif state == "absent": if rule_group.get(): if rules: if len(rules) > 0: - change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) + change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) if change and not check_mode: retval = rule_group.update( - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name + description, rules, sampled_requests, cloudwatch_metrics, metric_name ) else: change = True if not check_mode: retval = rule_group.remove() - module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=['tags'])) + module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=["tags"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py index a42bea0c2..58862a9a5 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_rule_group_info version_added: 1.5.0 @@ -15,11 +14,6 @@ short_description: wafv2_web_acl_info description: - Get informations about existing wafv2 rule groups. options: - state: - description: - - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01. - required: false - type: str name: description: - The name of the rule group. @@ -33,21 +27,19 @@ options: type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: rule group info community.aws.wafv2_rule_group_info: name: test02 - state: present scope: REGIONAL -''' +""" -RETURN = """ +RETURN = r""" arn: description: Rule group arn sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 @@ -95,23 +87,21 @@ visibility_config: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups def get_rule_group(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_rule_group( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_rule_group(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 rule group.") return response @@ -119,46 +109,39 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): def main(): arg_spec = dict( - state=dict(type='str', required=False), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( argument_spec=arg_spec, - supports_check_mode=True + supports_check_mode=True, ) - state = module.params.get("state") name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') - - if state: - module.deprecate( - 'The state parameter does nothing, has been deprecated, and will be removed in a future release.', - version='6.0.0', collection_name='community.aws') + wafv2 = module.client("wafv2") # check if rule group exists response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) id = None retval = {} - for item in response.get('RuleGroups'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("RuleGroups"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") existing_group = None if id: existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup')) + retval = camel_dict_to_snake_dict(existing_group.get("RuleGroup")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval['tags'] = tags or {} + retval["tags"] = tags or {} module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py index f91fe64e6..054c093c5 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_web_acl version_added: 1.5.0 @@ -89,7 +88,6 @@ options: - A map of custom response keys and content bodies. Define response bodies here and reference them in the rules by providing - the key of the body dictionary element. - Each element must have a unique dict key and in the dict two keys for I(content_type) and I(content). - - Requires botocore >= 1.20.40 type: dict version_added: 3.1.0 purge_rules: @@ -102,14 +100,13 @@ notes: - Support for the I(purge_tags) parameter was added in release 4.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create test web acl community.aws.wafv2_web_acl: name: test05 @@ -249,10 +246,9 @@ EXAMPLES = ''' content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' region: us-east-1 state: present +""" -''' - -RETURN = """ +RETURN = r""" arn: description: web acl arn sample: arn:aws:wafv2:eu-central-1:123456789012:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 @@ -315,14 +311,17 @@ visibility_config: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags @@ -338,26 +337,35 @@ class WebACL: self.fail_json_aws = fail_json_aws self.existing_acl, self.id, self.locktoken = self.get_web_acl() - def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name, custom_response_bodies): + def update( + self, + default_action, + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + custom_response_bodies, + ): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'DefaultAction': default_action, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "DefaultAction": default_action, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, }, - 'LockToken': self.locktoken + "LockToken": self.locktoken, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if custom_response_bodies: - req_obj['CustomResponseBodies'] = custom_response_bodies + req_obj["CustomResponseBodies"] = custom_response_bodies try: response = self.wafv2.update_web_acl(**req_obj) @@ -369,12 +377,7 @@ class WebACL: def remove(self): try: - response = self.wafv2.delete_web_acl( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + response = self.wafv2.delete_web_acl(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response @@ -391,47 +394,53 @@ class WebACL: existing_acl = None response = self.list() - for item in response.get('WebACLs'): - if item.get('Name') == self.name: - id = item.get('Id') - locktoken = item.get('LockToken') - arn = item.get('ARN') + for item in response.get("WebACLs"): + if item.get("Name") == self.name: + id = item.get("Id") + locktoken = item.get("LockToken") + arn = item.get("ARN") if id: try: - existing_acl = self.wafv2.get_web_acl( - Name=self.name, - Scope=self.scope, - Id=id - ) + existing_acl = self.wafv2.get_web_acl(Name=self.name, Scope=self.scope, Id=id) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 web acl.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_acl['tags'] = tags + existing_acl["tags"] = tags return existing_acl, id, locktoken def list(self): return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) - def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description, custom_response_bodies): + def create( + self, + default_action, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + tags, + description, + custom_response_bodies, + ): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'DefaultAction': default_action, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "DefaultAction": default_action, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if custom_response_bodies: - req_obj['CustomResponseBodies'] = custom_response_bodies + req_obj["CustomResponseBodies"] = custom_response_bodies if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_web_acl(**req_obj) @@ -443,7 +452,6 @@ class WebACL: def format_result(result): - # We were returning details of the Web ACL inside a "web_acl" parameter on # creation, keep returning it to avoid breaking existing playbooks, but also # return what the docs said we return (and returned when no change happened) @@ -451,31 +459,30 @@ def format_result(result): if "WebACL" in retval: retval.update(retval["WebACL"]) - return camel_dict_to_snake_dict(retval, ignore_list=['tags']) + return camel_dict_to_snake_dict(retval, ignore_list=["tags"]) def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - description=dict(type='str'), - default_action=dict(type='str', choices=['Block', 'Allow']), - rules=dict(type='list', elements='dict'), - sampled_requests=dict(type='bool', default=False), - cloudwatch_metrics=dict(type='bool', default=True), - metric_name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - custom_response_bodies=dict(type='dict'), - purge_rules=dict(default=True, type='bool'), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + description=dict(type="str"), + default_action=dict(type="str", choices=["Block", "Allow"]), + rules=dict(type="list", elements="dict"), + sampled_requests=dict(type="bool", default=False), + cloudwatch_metrics=dict(type="bool", default=True), + metric_name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + custom_response_bodies=dict(type="dict"), + purge_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['default_action', 'rules']]] + required_if=[["state", "present", ["default_action", "rules"]]], ) state = module.params.get("state") @@ -494,16 +501,15 @@ def main(): custom_response_bodies = module.params.get("custom_response_bodies") if custom_response_bodies: - module.require_botocore_at_least('1.20.40', reason='to set custom response bodies') custom_response_bodies = {} for custom_name, body in module.params.get("custom_response_bodies").items(): custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True) - if default_action == 'Block': - default_action = {'Block': {}} - elif default_action == 'Allow': - default_action = {'Allow': {}} + if default_action == "Block": + default_action = {"Block": {}} + elif default_action == "Allow": + default_action = {"Allow": {}} if rules: rules = [] @@ -513,17 +519,19 @@ def main(): if not metric_name: metric_name = name - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") web_acl = WebACL(wafv2, name, scope, module.fail_json_aws) change = False retval = {} - if state == 'present': + if state == "present": if web_acl.get(): - tags_changed = ensure_wafv2_tags(wafv2, web_acl.get().get('WebACL').get('ARN'), tags, purge_tags, module.fail_json_aws, module.check_mode) - change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) - change = change or (description and web_acl.get().get('WebACL').get('Description') != description) - change = change or (default_action and web_acl.get().get('WebACL').get('DefaultAction') != default_action) + tags_changed = ensure_wafv2_tags( + wafv2, web_acl.get().get("WebACL").get("ARN"), tags, purge_tags, module.fail_json_aws, module.check_mode + ) + change, rules = compare_priority_rules(web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state) + change = change or (description and web_acl.get().get("WebACL").get("Description") != description) + change = change or (default_action and web_acl.get().get("WebACL").get("DefaultAction") != default_action) if change and not check_mode: retval = web_acl.update( @@ -533,7 +541,7 @@ def main(): sampled_requests, cloudwatch_metrics, metric_name, - custom_response_bodies + custom_response_bodies, ) elif tags_changed: retval, id, locktoken = web_acl.get_web_acl() @@ -553,14 +561,16 @@ def main(): metric_name, tags, description, - custom_response_bodies + custom_response_bodies, ) - elif state == 'absent': + elif state == "absent": if web_acl.get(): if rules: if len(rules) > 0: - change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) + change, rules = compare_priority_rules( + web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state + ) if change and not check_mode: retval = web_acl.update( default_action, @@ -569,7 +579,7 @@ def main(): sampled_requests, cloudwatch_metrics, metric_name, - custom_response_bodies + custom_response_bodies, ) else: change = True @@ -579,5 +589,5 @@ def main(): module.exit_json(changed=change, **format_result(retval)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py index 13be05db5..e3cdc46e3 100644 --- a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py +++ b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_web_acl_info version_added: 1.5.0 @@ -28,21 +27,20 @@ options: type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: get web acl community.aws.wafv2_web_acl_info: name: test05 scope: REGIONAL register: out -''' +""" -RETURN = """ +RETURN = r""" arn: description: web acl arn sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 @@ -91,33 +89,30 @@ visibility_config: """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( @@ -129,7 +124,7 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) @@ -137,19 +132,19 @@ def main(): arn = None retval = {} - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_acl.get('WebACL')) + retval = camel_dict_to_snake_dict(existing_acl.get("WebACL")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval['tags'] = tags + retval["tags"] = tags module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/aws/pyproject.toml b/ansible_collections/community/aws/pyproject.toml new file mode 100644 index 000000000..a3810fdc1 --- /dev/null +++ b/ansible_collections/community/aws/pyproject.toml @@ -0,0 +1,43 @@ +[tool.black] +skip-string-normalization = false +line-length = 120 +target-version = ['py37', 'py38'] +extend-exclude = ''' +/( + | plugins/module_utils/_version.py +)/ +''' + +[tool.darker] +revision = "origin/main.." + +src = [ + "plugins", + "tests/unit", + "tests/integration", +] + +[tool.isort] +profile = "black" +force_single_line = true +line_length = 120 + +src_paths = [ + "plugins", + "tests/unit", + "tests/integration", +] + +# Unstable +skip = [ + "aws_ssm.py" +] + +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "ANSIBLE_CORE", "ANSIBLE_AMAZON_AWS", "ANSIBLE_COMMUNITY_AWS", "LOCALFOLDER"] +known_third_party = ["botocore", "boto3"] +known_ansible_core = ["ansible"] +known_ansible_amazon_aws = ["ansible_collections.amazon.aws"] +known_ansible_community_aws = ["ansible_collections.community.aws"] + +[tool.flynt] +transform-joins = true diff --git a/ansible_collections/community/aws/requirements.txt b/ansible_collections/community/aws/requirements.txt index 0a1981f46..cd474e3b6 100644 --- a/ansible_collections/community/aws/requirements.txt +++ b/ansible_collections/community/aws/requirements.txt @@ -2,5 +2,5 @@ # - tests/unit/constraints.txt # - tests/integration/constraints.txt # - tests/integration/targets/setup_botocore_pip -botocore>=1.21.0 -boto3>=1.18.0 +botocore>=1.29.0 +boto3>=1.26.0 diff --git a/ansible_collections/community/aws/test-requirements.txt b/ansible_collections/community/aws/test-requirements.txt index 82ab3b8c6..03e59f596 100644 --- a/ansible_collections/community/aws/test-requirements.txt +++ b/ansible_collections/community/aws/test-requirements.txt @@ -10,12 +10,12 @@ pytest pytest-forked pytest-mock pytest-xdist +pytest-ansible ; python_version >= '3.7' +git+https://github.com/ansible-community/pytest-ansible-units.git ; python_version < '3.7' # Needed for ansible.utils.ipaddr in tests netaddr # Sometimes needed where we don't have features we need in modules awscli -# Used for comparing SSH Public keys to the Amazon fingerprints -pycrypto -# Used by ec2_win_password +# Used for comparing SSH Public keys to the Amazon fingerprints and ec2_win_password cryptography diff --git a/ansible_collections/community/aws/tests/config.yml b/ansible_collections/community/aws/tests/config.yml index 5112f7268..8d053169d 100644 --- a/ansible_collections/community/aws/tests/config.yml +++ b/ansible_collections/community/aws/tests/config.yml @@ -1,2 +1,2 @@ modules: - python_requires: '>=3.6' + python_requires: '>=3.7' diff --git a/ansible_collections/community/aws/tests/integration/constraints.txt b/ansible_collections/community/aws/tests/integration/constraints.txt index cd546e7c2..f388e1f90 100644 --- a/ansible_collections/community/aws/tests/integration/constraints.txt +++ b/ansible_collections/community/aws/tests/integration/constraints.txt @@ -1,7 +1,11 @@ # Specifically run tests against the oldest versions that we support -boto3==1.18.0 -botocore==1.21.0 +botocore==1.29.0 +boto3==1.26.0 # AWS CLI has `botocore==` dependencies, provide the one that matches botocore # to avoid needing to download over a years worth of awscli wheels. -awscli==1.20.0 +awscli==1.27.0 + +# AWS CLI depends on PyYAML <5.5,>=3.10; the latest PyYAML release in that range, 5.4.1, fails to install. +# Use a version in that range that is known to work (https://github.com/yaml/pyyaml/issues/736) +PyYAML==5.3.1 diff --git a/ansible_collections/community/aws/tests/integration/requirements.txt b/ansible_collections/community/aws/tests/integration/requirements.txt index 352e8b7ff..aa71c9681 100644 --- a/ansible_collections/community/aws/tests/integration/requirements.txt +++ b/ansible_collections/community/aws/tests/integration/requirements.txt @@ -8,6 +8,6 @@ virtualenv # Sometimes needed where we don't have features we need in modules awscli # Used for comparing SSH Public keys to the Amazon fingerprints -pycrypto +cryptography # Used by ec2_asg_scheduled_action python-dateutil diff --git a/ansible_collections/community/aws/tests/integration/requirements.yml b/ansible_collections/community/aws/tests/integration/requirements.yml new file mode 100644 index 000000000..d3e5b3032 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/requirements.yml @@ -0,0 +1,8 @@ +--- +collections: + - name: https://github.com/ansible-collections/amazon.aws.git + type: git + version: main + - ansible.windows + - community.crypto + - community.general diff --git a/ansible_collections/community/aws/tests/integration/targets/accessanalyzer_validate_policy_info/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/accessanalyzer_validate_policy_info/tasks/main.yml index 857a7c1b4..811ef9fb5 100644 --- a/ansible_collections/community/aws/tests/integration/targets/accessanalyzer_validate_policy_info/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/accessanalyzer_validate_policy_info/tasks/main.yml @@ -1,10 +1,10 @@ --- - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' block: - name: get ARN of calling user diff --git a/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/full_acm_test.yml b/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/full_acm_test.yml index 5cbd156dd..4c45db05e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/full_acm_test.yml +++ b/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/full_acm_test.yml @@ -2,15 +2,15 @@ module_defaults: group/aws: aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' block: - name: list certs - aws_acm_info: null + acm_certificate_info: null register: list_all - name: list certs with check mode - aws_acm_info: null + acm_certificate_info: null register: list_all_check check_mode: yes # read-only task, should work the same as with no - name: check certificate listing worked @@ -20,12 +20,12 @@ - list_all_check.certificates is defined - list_all.certificates == list_all_check.certificates - name: ensure absent cert which doesn't exist - first time - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' state: absent with_items: '{{ local_certs }}' - name: ensure absent cert which doesn't exist - second time - aws_acm: + acm_certificate: name_tag: '{{ item[0].name }}' state: absent check_mode: '{{ item[1] }}' @@ -39,7 +39,7 @@ - not item.changed with_items: "{{ absent_start_two.results }}" - name: list cert which shouldn't exist - aws_acm_info: + acm_certificate_info: tags: Name: '{{ item.name }}' register: list_tag @@ -75,7 +75,7 @@ privatekey_path: '{{ item.priv_key }}' selfsigned_digest: sha256 - name: upload certificate with check mode - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' certificate: '{{ lookup(''file'', item.cert ) }}' private_key: '{{ lookup(''file'', item.priv_key ) }}' @@ -84,7 +84,7 @@ register: upload_check with_items: '{{ local_certs }}' - name: check whether cert was uploaded in check mode - aws_acm_info: + acm_certificate_info: tags: Name: '{{ item.name }}' register: list_after_check_mode_upload @@ -96,7 +96,7 @@ - upload_check.changed - (item.certificates | length) == 0 - name: upload certificates first time - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' certificate: '{{ lookup(''file'', item.cert ) }}' private_key: '{{ lookup(''file'', item.priv_key ) }}' @@ -119,7 +119,7 @@ original_cert: '{{ item.item }}' prev_task: '{{ item }}' - name: fetch data about cert just uploaded, by ARN - aws_acm_info: + acm_certificate_info: certificate_arn: '{{ item.certificate.arn }}' register: fetch_after_up with_items: '{{ upload.results }}' @@ -138,7 +138,7 @@ upload_result: '{{ item.item }}' original_cert: '{{ item.item.item }}' - name: fetch data about cert just uploaded, by name - aws_acm_info: + acm_certificate_info: tags: Name: '{{ original_cert.name }}' register: fetch_after_up_name @@ -161,7 +161,7 @@ upload_result: '{{ item.item }}' original_cert: '{{ item.item.item }}' - name: fetch data about cert just uploaded, by domain name - aws_acm_info: + acm_certificate_info: domain_name: '{{ original_cert.domain }}' register: fetch_after_up_domain with_items: '{{ upload.results }}' @@ -182,7 +182,7 @@ upload_result: '{{ item.item }}' original_cert: '{{ item.item.item }}' - name: upload certificates again, check not changed - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' certificate: '{{ lookup(''file'', item.cert ) }}' private_key: '{{ lookup(''file'', item.priv_key ) }}' @@ -191,7 +191,7 @@ with_items: '{{ local_certs }}' failed_when: upload2.changed - name: update first cert with body of the second, first time, check mode - aws_acm: + acm_certificate: state: present name_tag: '{{ local_certs[0].name }}' certificate: '{{ lookup(''file'', local_certs[1].cert ) }}' @@ -203,7 +203,7 @@ that: - overwrite_check.changed - name: check previous tasks did not change real cert - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[0].name }}' register: fetch_after_overwrite_check @@ -217,7 +217,7 @@ - '''Name'' in fetch_after_overwrite_check.certificates[0].tags' - fetch_after_overwrite_check.certificates[0].tags['Name'] == local_certs[0].name - name: update first cert with body of the second, first real time - aws_acm: + acm_certificate: state: present name_tag: '{{ local_certs[0].name }}' certificate: '{{ lookup(''file'', local_certs[1].cert ) }}' @@ -232,7 +232,7 @@ - overwrite.certificate.domain_name == local_certs[1].domain - overwrite.changed - name: check update was sucessfull - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[0].name }}' register: fetch_after_overwrite @@ -246,7 +246,7 @@ - '''Name'' in fetch_after_overwrite.certificates[0].tags' - fetch_after_overwrite.certificates[0].tags['Name'] == local_certs[0].name - name: fetch other cert - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[1].name }}' register: check_after_overwrite @@ -260,7 +260,7 @@ - '''Name'' in check_after_overwrite.certificates[0].tags' - check_after_overwrite.certificates[0].tags['Name'] == local_certs[1].name - name: update first cert with body of the second again - aws_acm: + acm_certificate: state: present name_tag: '{{ local_certs[0].name }}' certificate: '{{ lookup(''file'', local_certs[1].cert ) }}' @@ -275,7 +275,7 @@ - overwrite2.certificate.domain_name == local_certs[1].domain - not overwrite2.changed - name: delete certs 1 and 2 in check mode - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[1].domain }}' check_mode: yes @@ -285,7 +285,7 @@ that: - delete_both_check.changed - name: fetch info for certs 1 and 2 - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[item].name }}' register: check_del_one_check @@ -298,7 +298,7 @@ that: - (item.certificates | length) == 1 - name: delete certs 1 and 2 real - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[1].domain }}' register: delete_both @@ -310,7 +310,7 @@ - upload.results[0].certificate.arn in delete_both.arns - delete_both.changed - name: fetch info for certs 1 and 2 - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[item].name }}' register: check_del_one @@ -327,7 +327,7 @@ assert: that: (item.certificates | length) == 0 - name: check cert 3 - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[2].name }}' register: check_del_one_remain @@ -336,7 +336,7 @@ that: - (check_del_one_remain.certificates | length) == 1 - name: delete cert 3 - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[2].domain }}' register: delete_third @@ -348,13 +348,13 @@ - delete_third.arns[0] == upload.results[2].certificate.arn - delete_third.changed - name: check cert 3 was deleted - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[2].name }}' register: check_del_three failed_when: check_del_three.certificates | length != 0 - name: delete cert 3 again - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[2].domain }}' register: delete_third @@ -365,7 +365,7 @@ - delete_third.arns | length == 0 - not delete_third.changed - name: delete cert 3 again, check mode - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[2].domain }}' check_mode: yes @@ -415,7 +415,7 @@ root_certificates: - '{{ local_certs[item.ca].cert }}' - name: upload chained cert, first chain, first time - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' certificate: '{{ lookup(''file'', chained_cert.chains[0].cert ) }}' certificate_chain: '{{ chains.results[0].complete_chain | join('' @@ -426,7 +426,7 @@ register: upload_chain failed_when: not upload_chain.changed - name: fetch chain of cert we just uploaded - aws_acm_info: + acm_certificate_info: tags: Name: '{{ chained_cert.name }}' register: check_chain @@ -440,7 +440,7 @@ - (check_chain.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', '')) == ( chains.results[0].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') ) - (check_chain.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == ( lookup('file', chained_cert.chains[0].cert ) | replace( ' ', '' ) | replace( '\n', '') ) - name: upload chained cert again, check not changed - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' certificate: '{{ lookup(''file'', chained_cert.chains[0].cert ) }}' certificate_chain: '{{ chains.results[0].complete_chain | join('' @@ -455,7 +455,7 @@ - upload_chain_2.certificate.arn == upload_chain.certificate.arn - not upload_chain_2.changed - name: upload chained cert, different chain - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' certificate: '{{ lookup(''file'', chained_cert.chains[1].cert ) }}' certificate_chain: '{{ chains.results[1].complete_chain | join('' @@ -470,7 +470,7 @@ - upload_chain_3.changed - upload_chain_3.certificate.arn == upload_chain.certificate.arn - name: fetch info about chain of cert we just updated - aws_acm_info: + acm_certificate_info: tags: Name: '{{ chained_cert.name }}' register: check_chain_2 @@ -480,7 +480,7 @@ - (check_chain_2.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', '')) == ( chains.results[1].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') ) - (check_chain_2.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == ( lookup('file', chained_cert.chains[1].cert ) | replace( ' ', '' ) | replace( '\n', '') ) - name: delete chained cert - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' state: absent register: delete_chain_3 @@ -491,13 +491,13 @@ - upload_chain.certificate.arn in delete_chain_3.arns always: - name: delete first bunch of certificates - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' state: absent with_items: '{{ local_certs }}' ignore_errors: true - name: delete chained cert - aws_acm: + acm_certificate: state: absent name_tag: '{{ chained_cert.name }}' ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/main.yml index bf70587e6..5cc6d31a0 100644 --- a/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/acm_certificate/tasks/main.yml @@ -2,9 +2,9 @@ module_defaults: group/aws: aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' block: # The CI runs many of these tests in parallel # Use this random ID to differentiate which resources @@ -12,7 +12,7 @@ - set_fact: aws_acm_test_uuid: "{{ (10**9) | random }}" - name: attempt to delete cert without specifying required parameter - aws_acm: + acm_certificate: state: absent register: result ignore_errors: true @@ -22,23 +22,23 @@ - 'result.failed' - '"If ''state'' is specified as ''absent'' then exactly one of ''name_tag''" in result.msg' - name: list certs - aws_acm_info: null + acm_certificate_info: null register: list_all failed_when: list_all.certificates is not defined - name: ensure absent cert which doesn't exist - first time - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' state: absent with_items: '{{ local_certs }}' - name: ensure absent cert which doesn't exist - second time - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' state: absent with_items: '{{ local_certs }}' register: absent_start_two failed_when: absent_start_two.changed - name: list cert which shouldn't exist - aws_acm_info: + acm_certificate_info: tags: Name: '{{ item.name }}' register: list_tag @@ -71,7 +71,7 @@ - name: try to upload certificate, but name_tag conflicts with tags.Name vars: local_cert: '{{ local_certs[0] }}' - aws_acm: + acm_certificate: name_tag: '{{ local_cert.name }}' certificate: '{{ lookup(''file'', local_cert.cert ) }}' private_key: '{{ lookup(''file'', local_cert.priv_key ) }}' @@ -88,7 +88,7 @@ - 'result.failed' - '"conflicts with value of" in result.msg' - name: upload certificates first time - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' certificate: '{{ lookup(''file'', item.cert ) }}' private_key: '{{ lookup(''file'', item.priv_key ) }}' @@ -115,7 +115,7 @@ original_cert: '{{ item.item }}' prev_task: '{{ item }}' - name: fetch data about cert just uploaded, by ARN - aws_acm_info: + acm_certificate_info: certificate_arn: '{{ item.certificate.arn }}' register: fetch_after_up with_items: '{{ upload.results }}' @@ -138,7 +138,7 @@ upload_result: '{{ item.item }}' original_cert: '{{ item.item.item }}' - name: fetch data about cert just uploaded, by name - aws_acm_info: + acm_certificate_info: tags: Name: '{{ original_cert.name }}' register: fetch_after_up_name @@ -161,7 +161,7 @@ upload_result: '{{ item.item }}' original_cert: '{{ item.item.item }}' - name: fetch data about cert just uploaded, by domain name - aws_acm_info: + acm_certificate_info: domain_name: '{{ original_cert.domain }}' register: fetch_after_up_domain with_items: '{{ upload.results }}' @@ -182,7 +182,7 @@ upload_result: '{{ item.item }}' original_cert: '{{ item.item.item }}' - name: upload certificates again, check not changed - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' certificate: '{{ lookup(''file'', item.cert ) }}' private_key: '{{ lookup(''file'', item.priv_key ) }}' @@ -191,7 +191,7 @@ with_items: '{{ local_certs }}' failed_when: upload2.changed - name: change tags of existing certificate, check mode - aws_acm: + acm_certificate: certificate_arn: '{{ certificate_arn }}' tags: Name: '{{ name_tag }}' @@ -208,7 +208,7 @@ that: - certificate_with_tags.changed - name: change tags of existing certificate, changes expected - aws_acm: + acm_certificate: # When applying tags to an existing certificate, it is sufficient to specify the 'certificate_arn'. # Previously, the 'aws_acm' module was requiring the 'certificate', 'name_tag' and 'domain_name' # attributes. @@ -239,7 +239,7 @@ vars: name_tag: '{{ upload2.results[0].item.name }}' - name: change tags of existing certificate, check mode again - aws_acm: + acm_certificate: certificate_arn: '{{ certificate_arn }}' tags: Name: '{{ name_tag }}' @@ -255,7 +255,7 @@ that: - not certificate_with_tags.changed - name: change tags of existing certificate, no change expected - aws_acm: + acm_certificate: certificate_arn: '{{ certificate_arn }}' tags: Name: '{{ name_tag }}' @@ -299,7 +299,7 @@ - certificate_with_tags.certificate.tags['Environment'] == 'staging' - certificate_with_tags.certificate.tags['Owner'] == 'Bob' - name: change tags of existing certificate, purge tags - aws_acm: + acm_certificate: certificate_arn: '{{ certificate_arn }}' tags: Name: '{{ name_tag }}' @@ -328,7 +328,7 @@ - certificate_with_tags.certificate.tags['Application'] == 'search' - certificate_with_tags.certificate.tags['Environment'] == 'staging' - name: update first cert with body of the second, first time - aws_acm: + acm_certificate: state: present name_tag: '{{ local_certs[0].name }}' certificate: '{{ lookup(''file'', local_certs[1].cert ) }}' @@ -343,7 +343,7 @@ - overwrite.certificate.domain_name == local_certs[1].domain - overwrite.changed - name: check update was sucessfull - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[0].name }}' register: fetch_after_overwrite @@ -357,7 +357,7 @@ - '''Name'' in fetch_after_overwrite.certificates[0].tags' - fetch_after_overwrite.certificates[0].tags['Name'] == local_certs[0].name - name: fetch other cert - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[1].name }}' register: check_after_overwrite @@ -371,7 +371,7 @@ - '''Name'' in check_after_overwrite.certificates[0].tags' - check_after_overwrite.certificates[0].tags['Name'] == local_certs[1].name - name: update first cert with body of the second again - aws_acm: + acm_certificate: state: present name_tag: '{{ local_certs[0].name }}' certificate: '{{ lookup(''file'', local_certs[1].cert ) }}' @@ -386,7 +386,7 @@ - overwrite2.certificate.domain_name == local_certs[1].domain - not overwrite2.changed - name: delete certs 1 and 2 - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[1].domain }}' register: delete_both @@ -398,7 +398,7 @@ - upload.results[0].certificate.arn in delete_both.arns - delete_both.changed - name: fetch info for certs 1 and 2 - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[item].name }}' register: check_del_one @@ -415,13 +415,13 @@ assert: that: item.certificates | length == 0 - name: check cert 3 not deleted - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[2].name }}' register: check_del_one_remain failed_when: check_del_one_remain.certificates | length != 1 - name: delete cert 3 - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[2].domain }}' register: delete_third @@ -433,13 +433,13 @@ - delete_third.arns[0] == upload.results[2].certificate.arn - delete_third.changed - name: check cert 3 was deleted - aws_acm_info: + acm_certificate_info: tags: Name: '{{ local_certs[2].name }}' register: check_del_three failed_when: check_del_three.certificates | length != 0 - name: delete cert 3 again - aws_acm: + acm_certificate: state: absent domain_name: '{{ local_certs[2].domain }}' register: delete_third @@ -490,7 +490,7 @@ root_certificates: - '{{ local_certs[item.ca].cert }}' - name: upload chained cert, first chain, first time - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' certificate: '{{ lookup(''file'', chained_cert.chains[0].cert ) }}' certificate_chain: '{{ chains.results[0].complete_chain | join('' @@ -501,7 +501,7 @@ register: upload_chain failed_when: not upload_chain.changed - name: fetch chain of cert we just uploaded - aws_acm_info: + acm_certificate_info: tags: Name: '{{ chained_cert.name }}' register: check_chain @@ -513,7 +513,7 @@ - (check_chain.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', '')) == ( chains.results[0].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') ) - (check_chain.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == ( lookup('file', chained_cert.chains[0].cert ) | replace( ' ', '' ) | replace( '\n', '') ) - name: upload chained cert again, check not changed - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' certificate: '{{ lookup(''file'', chained_cert.chains[0].cert ) }}' certificate_chain: '{{ chains.results[0].complete_chain | join('' @@ -528,7 +528,7 @@ - upload_chain_2.certificate.arn == upload_chain.certificate.arn - not upload_chain_2.changed - name: upload chained cert, different chain - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' certificate: '{{ lookup(''file'', chained_cert.chains[1].cert ) }}' certificate_chain: '{{ chains.results[1].complete_chain | join('' @@ -543,7 +543,7 @@ - upload_chain_3.changed - upload_chain_3.certificate.arn == upload_chain.certificate.arn - name: fetch info about chain of cert we just updated - aws_acm_info: + acm_certificate_info: tags: Name: '{{ chained_cert.name }}' register: check_chain_2 @@ -555,7 +555,7 @@ - (check_chain_2.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', '')) == ( chains.results[1].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') ) - (check_chain_2.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == ( lookup('file', chained_cert.chains[1].cert ) | replace( ' ', '' ) | replace( '\n', '') ) - name: delete chained cert - aws_acm: + acm_certificate: name_tag: '{{ chained_cert.name }}' state: absent register: delete_chain_3 @@ -566,13 +566,13 @@ - upload_chain.certificate.arn in delete_chain_3.arns always: - name: delete first bunch of certificates - aws_acm: + acm_certificate: name_tag: '{{ item.name }}' state: absent with_items: '{{ local_certs }}' ignore_errors: true - name: delete chained cert - aws_acm: + acm_certificate: state: absent name_tag: '{{ chained_cert.name }}' ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/api_gateway/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/api_gateway/defaults/main.yml new file mode 100644 index 000000000..aca496660 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/api_gateway/defaults/main.yml @@ -0,0 +1,9 @@ +--- +api_names: + - "ansible-api-{{ resource_prefix }}-1" + - "ansible-api-{{ resource_prefix }}-2" +resource_tags: + - gateway_name: "ansible-api-{{ resource_prefix }}" + ansible_test: "{{ resource_prefix }}-1" + - gateway_name: "ansible-api-{{ resource_prefix }}" + ansible_test: "{{ resource_prefix }}-2" diff --git a/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/lookup.yml b/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/lookup.yml new file mode 100644 index 000000000..8e0965439 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/lookup.yml @@ -0,0 +1,211 @@ +--- +- name: Test API gateway creation using lookup=tag + vars: + api_name: "{{ api_names[0] }}" + block: + - name: Define API gateway configuration + set_fact: + apigateway_swagger_text: "{{ lookup('template', 'minimal-swagger-api.yml.j2') }}" + + # Test: create API gateway using check_mode = true + - name: Create API gateway (check_mode=true) + community.aws.api_gateway: + name: "{{ api_name }}" + swagger_text: "{{ apigateway_swagger_text }}" + check_mode: true + register: __create_check_mode + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure using check_mode=true, no API gateway was created + assert: + that: + - __create_check_mode is changed + - gateways.rest_apis | selectattr('name', 'equalto', api_name) | list | length == 0 + + # Test: create new API gateway using name and tags + - name: Create new API gateway + community.aws.api_gateway: + name: "{{ api_name }}" + swagger_text: "{{ apigateway_swagger_text }}" + lookup: tag + tags: "{{ resource_tags[0] }}" + register: __create + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure new API was created + assert: + that: + - __create is changed + - gateways.rest_apis | selectattr('name', 'equalto', api_name) | list | length == 1 + + # Test: create API gateway idempotency (task reported changed but no new API created) + - name: Create same API gateway once again + community.aws.api_gateway: + name: "{{ api_name }}" + swagger_text: "{{ apigateway_swagger_text }}" + lookup: tag + tags: "{{ resource_tags[0] }}" + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure no new API was created + assert: + that: + - gateways.rest_apis | selectattr('name', 'equalto', api_name) | list | length == 1 + + # Test: create new API using existing name but different tags (new API gateway should be created) + - name: Create another API gateway with the same name but different tags + community.aws.api_gateway: + name: "{{ api_name }}" + swagger_text: "{{ apigateway_swagger_text }}" + lookup: tag + tags: "{{ resource_tags[1] }}" + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure new API was created + assert: + that: + - gateways.rest_apis | selectattr('name', 'equalto', api_name) | list | length == 2 + + rescue: + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Delete remaining API gateway + community.aws.api_gateway: + api_id: '{{ item }}' + state: absent + ignore_errors: true + with_items: "{{ gateways.rest_apis | selectattr('name', 'equalto', api_name) | map(attribute='id') | list }}" + +- name: Test API gateway deletion + block: + - name: "Create new API gateway name={{ api_name }}" + community.aws.api_gateway: + name: "{{ api_name }}" + swagger_text: "{{ lookup('template', 'minimal-swagger-api.yml.j2') }}" + lookup: tag + tags: "{{ resource_tags[0] }}" + vars: + api_name: "{{ api_names[1] }}" + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure new API was created + assert: + that: + - gateways.rest_apis | selectattr('name', 'equalto', api_names[1]) | list | length == 1 + - gateways.rest_apis | selectattr('name', 'equalto', api_names[0]) | list | length == 2 + + # Test: Delete with lookup=tag (conflict), should failed + - name: Delete API gateway + community.aws.api_gateway: + lookup: tag + tags: "{{ resource_tags[0] }}" + state: absent + register: __delete_conflict + ignore_errors: true + + - name: Ensure task failed + assert: + that: + - __delete_conflict is failed + - '__delete_conflict.msg == "Tags provided do not identify a unique API gateway"' + + # Test: Delete with name only (no api_id) + - name: Create same API gateway once again + community.aws.api_gateway: + name: "{{ api_names[1] }}" + state: absent + register: __delete_missing_params + ignore_errors: true + + - name: Ensure task failed + assert: + that: + - __delete_missing_params is failed + - '__delete_missing_params.msg == "API gateway id must be supplied to delete API gateway or provided tag with lookup=tag to identify API gateway id."' + + # Test: Delete (check_mode) + - name: Delete API gateway - check mode + community.aws.api_gateway: + name: "{{ api_names[1] }}" + lookup: tag + tags: "{{ resource_tags[0] }}" + state: absent + register: __delete_check_mode + check_mode: true + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure running in check mode, API was not deleted. + assert: + that: + - __delete_check_mode is changed + - gateways.rest_apis | selectattr('name', 'equalto', api_names[1]) | list | length == 1 + - gateways.rest_apis | selectattr('name', 'equalto', api_names[0]) | list | length == 2 + + # Test: Delete using name and API gateway + - name: Delete API gateway using name and lookup=tag + community.aws.api_gateway: + name: "{{ api_names[1] }}" + lookup: tag + tags: "{{ resource_tags[0] }}" + state: absent + register: __delete + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure matching API gateway was deleted + assert: + that: + - __delete is changed + - gateways.rest_apis | selectattr('name', 'equalto', api_names[1]) | list | length == 0 + - gateways.rest_apis | selectattr('name', 'equalto', api_names[0]) | list | length == 2 + + # Test: Delete using api_id + - name: Delete API gateway using api_id + community.aws.api_gateway: + api_id: "{{ gateways.rest_apis | selectattr('name', 'equalto', api_names[0]) | map(attribute='id') | first }}" + state: absent + register: __delete + + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Ensure matching API gateway was deleted + assert: + that: + - __delete is changed + - gateways.rest_apis | selectattr('name', 'equalto', api_names[0]) | list | length == 1 + + always: + - name: List existing API gateway + community.aws.api_gateway_info: + register: gateways + + - name: Delete remaining API gateway + community.aws.api_gateway: + api_id: '{{ item }}' + state: absent + ignore_errors: true + with_items: "{{ gateways.rest_apis | selectattr('name', 'in', api_names) | map(attribute='id') | list }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/main.yml index 51db07f0d..2e00128cd 100644 --- a/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/main.yml @@ -1,9 +1,9 @@ - name: Wrap API Gateway tests with credentials by default module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -11,7 +11,7 @@ # ====================== testing failure cases: ================================== - name: test with no parameters - aws_api_gateway: + api_gateway: register: result ignore_errors: true @@ -22,7 +22,7 @@ - '"no swagger info provided" in result.msg' - name: test for disallowing multiple swagger sources - aws_api_gateway: + api_gateway: api_id: 'fake-api-doesnt-exist' swagger_file: foo.yml swagger_text: "this is not really an API" @@ -42,9 +42,11 @@ template: src: minimal-swagger-api.yml.j2 dest: "{{output_dir}}/minimal-swagger-api.yml" + vars: + api_name: "{{ resource_prefix }}-minimal" - name: deploy new API - aws_api_gateway: + api_gateway: api_file: "{{output_dir}}/minimal-swagger-api.yml" stage: "minimal" endpoint_type: 'REGIONAL' @@ -58,11 +60,14 @@ - 'create_result.failed == False' - 'create_result.deploy_response.description == "Automatic deployment by Ansible."' - 'create_result.configure_response.id == create_result.api_id' - - '"apigateway:CreateRestApi" in create_result.resource_actions' - 'create_result.configure_response.endpoint_configuration.types.0 == "REGIONAL"' - name: check if API endpoint works - uri: url="https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/minimal" + uri: + url: "https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/minimal" + retries: 10 + delay: 5 + until: uri_result is successful register: uri_result - name: assert API works success @@ -71,7 +76,8 @@ - 'uri_result.status == 200' - name: check if nonexistent endpoint causes error - uri: url="https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/nominal" + uri: + url: "https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/nominal" register: bad_uri_result ignore_errors: true @@ -81,7 +87,7 @@ - bad_uri_result is failed - name: Update API to test params effect - aws_api_gateway: + api_gateway: api_id: '{{create_result.api_id}}' api_file: "{{output_dir}}/minimal-swagger-api.yml" cache_enabled: true @@ -93,14 +99,12 @@ - name: assert update result assert: that: - - 'update_result.changed == True' - - 'update_result.failed == False' - - '"apigateway:PutRestApi" in update_result.resource_actions' + - update_result is changed # ==== additional create/delete tests ==== - name: deploy first API - aws_api_gateway: + api_gateway: api_file: "{{output_dir}}/minimal-swagger-api.yml" stage: "minimal" cache_enabled: false @@ -108,7 +112,7 @@ register: create_result_1 - name: deploy second API rapidly after first - aws_api_gateway: + api_gateway: api_file: "{{output_dir}}/minimal-swagger-api.yml" stage: "minimal" state: present @@ -124,13 +128,13 @@ - 'create_result_1.configure_response.endpoint_configuration.types.0 == "EDGE"' - name: destroy first API - aws_api_gateway: + api_gateway: state: absent api_id: '{{create_result_1.api_id}}' register: destroy_result_1 - name: destroy second API rapidly after first - aws_api_gateway: + api_gateway: state: absent api_id: '{{create_result_2.api_id}}' register: destroy_result_2 @@ -138,29 +142,33 @@ - name: assert both APIs deployed successfully assert: that: - - 'destroy_result_1.changed == True' - - 'destroy_result_2.changed == True' - - '"apigateway:DeleteRestApi" in destroy_result_1.resource_actions' - - '"apigateway:DeleteRestApi" in destroy_result_2.resource_actions' + - destroy_result_1 is changed + - destroy_result_2 is changed + + # ==== test create/delete using lookup=tag ==== + - include_tasks: lookup.yml + + # ==== Tagging ==== + - include_tasks: tagging.yml # ================= end testing ==================================== always: - name: Ensure cleanup of API deploy - aws_api_gateway: + api_gateway: state: absent api_id: '{{create_result.api_id}}' ignore_errors: true - name: Ensure cleanup of API deploy 1 - aws_api_gateway: + api_gateway: state: absent api_id: '{{create_result_1.api_id}}' ignore_errors: true - name: Ensure cleanup of API deploy 2 - aws_api_gateway: + api_gateway: state: absent api_id: '{{create_result_2.api_id}}' ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/tagging.yml b/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/tagging.yml new file mode 100644 index 000000000..b72035083 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/api_gateway/tasks/tagging.yml @@ -0,0 +1,91 @@ +--- +- name: Test API gateway tagging + vars: + api_name: "api-{{ resource_prefix }}-tagging" + apigateway_tags: + resource_prefix: "{{ resource_prefix }}" + collection: community.aws + new_tag: + resource_type: REST + block: + - name: Define API gateway configuration + set_fact: + apigateway_swagger_text: "{{ lookup('template', 'minimal-swagger-api.yml.j2') }}" + + - name: Create API gateway + community.aws.api_gateway: + swagger_text: "{{ apigateway_swagger_text }}" + tags: "{{ apigateway_tags }}" + register: __api_gateway_create + + - name: Assert resource was created with expected tags + assert: + that: + - __api_gateway_create.configure_response.tags == apigateway_tags + + - name: Define API gateway id + ansible.builtin.set_fact: + apigateway_id: "{{ __api_gateway_create.api_id }}" + + # Update tags purge_tags=false and check_mode + - name: Update tags using check_mode + community.aws.api_gateway: + api_id: "{{ apigateway_id }}" + tags: "{{ apigateway_tags | combine(new_tag) }}" + purge_tags: false + check_mode: true + + - name: Get API Gateway + community.aws.api_gateway_info: + ids: + - "{{ apigateway_id }}" + register: __api_gateway_info + + - name: Ensure tags were not changed + assert: + that: + - __api_gateway_info.rest_apis.0.tags == apigateway_tags + + # Update tags purge_tags=false + - name: Update tags + community.aws.api_gateway: + api_id: "{{ apigateway_id }}" + tags: "{{ apigateway_tags | combine(new_tag) }}" + purge_tags: false + + - name: Get API Gateway + community.aws.api_gateway_info: + ids: + - "{{ apigateway_id }}" + register: __api_gateway_info + + - name: Ensure tags were not changed + assert: + that: + - __api_gateway_info.rest_apis.0.tags == apigateway_tags | combine(new_tag) + + # Update tags purge_tags=true + - name: Update tags + community.aws.api_gateway: + api_id: "{{ apigateway_id }}" + tags: "{{ new_tag }}" + register: __update_api_gateway + + - name: Get api gateway + community.aws.api_gateway_info: + ids: + - "{{ apigateway_id }}" + register: __api_gateway_info + + - name: Ensure tags were not changed + assert: + that: + - __update_api_gateway is changed + - __api_gateway_info.rest_apis.0.tags == new_tag + + always: + - name: Delete API Gateway + community.aws.api_gateway: + api_id: "{{ apigateway_id }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/api_gateway/templates/minimal-swagger-api.yml.j2 b/ansible_collections/community/aws/tests/integration/targets/api_gateway/templates/minimal-swagger-api.yml.j2 index 8c5c05810..d1d4c7ff6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/api_gateway/templates/minimal-swagger-api.yml.j2 +++ b/ansible_collections/community/aws/tests/integration/targets/api_gateway/templates/minimal-swagger-api.yml.j2 @@ -2,7 +2,7 @@ swagger: "2.0" info: version: "2017-05-11T12:14:59Z" - title: "{{resource_prefix}}Empty_API" + title: "{{ api_name }}" host: "fakeexample.execute-api.us-east-1.amazonaws.com" basePath: "/minimal" schemes: diff --git a/ansible_collections/community/aws/tests/integration/targets/api_gateway_domain/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/api_gateway_domain/tasks/main.yml index 76de2657e..f3c740793 100644 --- a/ansible_collections/community/aws/tests/integration/targets/api_gateway_domain/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/api_gateway_domain/tasks/main.yml @@ -4,9 +4,9 @@ - name: Run aws_api_gateway_domain module integration tests module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" # NOTE: To make tests work set TLS ARN in defaults/main.yml to an existing and @@ -17,7 +17,7 @@ # ==================== preparations ======================================== - name: Preperations - Create REST API Gateway on AWS API Gateway service to reference from domain tests - aws_api_gateway: + api_gateway: swagger_file: files/api_gw_swagger.yml stage: test state: present @@ -26,7 +26,7 @@ # ================== integration tests ========================================== - name: Create Test - API gateway custom domain setup - aws_api_gateway_domain: + api_gateway_domain: domain_name: "{{ api_gateway_domain_name }}" certificate_arn: "{{ api_gateway_domain_tls_arn }}" security_policy: 'TLS_1_0' @@ -39,13 +39,13 @@ - assert: that: - create_result.changed == True - - create_result.response.domain.domain_name == "{{ api_gateway_domain_name }}" + - create_result.response.domain.domain_name == api_gateway_domain_name - create_result.response.domain.distribution_domain_name is defined - create_result.response.domain.distribution_hosted_zone_id is defined - create_result.response.path_mappings is defined - name: Idempotence Test - API gateway custom domain setup - aws_api_gateway_domain: + api_gateway_domain: domain_name: "{{ api_gateway_domain_name }}" certificate_arn: "{{ api_gateway_domain_tls_arn }}" security_policy: 'TLS_1_0' @@ -59,10 +59,10 @@ that: - repeat_result.changed == False - repeat_result.failed == False - - repeat_result.response.domain_name == "{{ api_gateway_domain_name }}" + - repeat_result.response.domain_name == api_gateway_domain_name - name: Update Test - API gateway custom domain setup, change settings - aws_api_gateway_domain: + api_gateway_domain: domain_name: "{{ api_gateway_domain_name }}" certificate_arn: "{{ api_gateway_domain_tls_arn }}" security_policy: 'TLS_1_2' @@ -75,13 +75,13 @@ - assert: that: - update_result.changed == True - - update_result.response.domain.domain_name == "{{ api_gateway_domain_name }}" + - update_result.response.domain.domain_name == api_gateway_domain_name - update_result.response.domain.security_policy == 'TLS_1_2' - update_result.response.domain.endpoint_configuration.types.0 == 'REGIONAL' - update_result.response.path_mappings.0.base_path = '/v1' - name: Delete - API gateway custom domain setup deletion - aws_api_gateway_domain: + api_gateway_domain: domain_name: "{{ api_gateway_domain_name }}" certificate_arn: "{{ api_gateway_domain_tls_arn }}" security_policy: 'TLS_1_2' @@ -101,7 +101,7 @@ always: - name: Cleanup - delete test domain setup - aws_api_gateway_domain: + api_gateway_domain: domain_name: "{{ api_gateway_domain_name }}" certificate_arn: "{{ api_gateway_domain_tls_arn }}" domain_mappings: [] @@ -109,7 +109,7 @@ ignore_errors: true - name: Cleanup - remove REST API Gateway on AWS API Gateway service - aws_api_gateway: + api_gateway: api_id: "{{ api_gateway_result.api_id }}" swagger_file: files/api_gw_swagger.yml state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_cleanup.yml index 75d1ecfad..ef894ff54 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_cleanup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_cleanup.yml @@ -1,5 +1,5 @@ - name: kill asg - ec2_asg: + autoscaling_group: name: "{{ asg_name }}" state: absent register: removed @@ -8,7 +8,7 @@ retries: 10 - name: remove launch configs - ec2_lc: + autoscaling_launch_config: name: "{{ lc_name }}" state: absent register: removed @@ -17,7 +17,7 @@ retries: 10 - name: remove the security group - ec2_group: + ec2_security_group: name: "{{ sg_name }}" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_setup.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_setup.yml index ae958cd89..b4609ea97 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/env_setup.yml @@ -37,7 +37,7 @@ - "{{ testing_subnet.subnet.id }}" - name: create a security group with the vpc created in the ec2_setup - ec2_group: + ec2_security_group: name: "{{ sg_name }}" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" @@ -53,7 +53,7 @@ register: sg - name: create a launch configuration - ec2_lc: + autoscaling_launch_config: name: "{{ lc_name }}" image_id: "{{ ec2_ami_id }}" instance_type: t2.micro @@ -67,7 +67,7 @@ - create_lc.failed is false - name: create a AutoScalingGroup - ec2_asg: + autoscaling_group: name: "{{ asg_name }}" launch_config_name: "{{ lc_name }}" health_check_period: 60 diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/main.yml index d8380d913..d4b2a7c7a 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/main.yml @@ -2,11 +2,12 @@ - name: "Wrap up all tests and setup AWS credentials" module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: + - amazon.aws - community.aws block: - include_tasks: 'env_setup.yml' diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/tests.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/tests.yml index 7d326c6ff..804f802bb 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/tests.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_complete_lifecycle_action/tasks/tests.yml @@ -3,7 +3,7 @@ block: #---------------------------------------------------------------------- - name: Create lifecycle hook - ec2_asg_lifecycle_hook: + autoscaling_lifecycle_hook: autoscaling_group_name: "{{ asg_name }}" lifecycle_hook_name: "{{ resource_prefix }}-lifecycle-hook" transition: autoscaling:EC2_INSTANCE_LAUNCHING @@ -18,7 +18,7 @@ - output is not failed - name: Create lifecycle hook - ec2_asg_lifecycle_hook: + autoscaling_lifecycle_hook: autoscaling_group_name: "{{ asg_name }}" lifecycle_hook_name: "{{ resource_prefix }}-lifecycle-hook-terminate" transition: autoscaling:EC2_INSTANCE_TERMINATING @@ -33,7 +33,7 @@ - output is not failed - name: Trigger scale-up - ec2_asg: + autoscaling_group: name: "{{ asg_name }}" replace_all_instances: yes min_size: 0 @@ -47,7 +47,7 @@ - scale_asg is changed - name: Describe ASG - ec2_asg_info: + autoscaling_group_info: name: "{{ asg_name }}" register: scaled_asg retries: 24 @@ -62,7 +62,7 @@ instance_ids: '{{ scaled_asg.results[0].instances | map(attribute="instance_id") | list }}' - name: Describe ASG - ec2_asg_info: + autoscaling_group_info: name: "{{ asg_name }}" - name: Complete Lifecycle Hook @@ -80,7 +80,7 @@ instance_id: '{{ instance_ids[1] }}' - name: Describe ASG - ec2_asg_info: + autoscaling_group_info: name: "{{ asg_name }}" register: hooks_pending retries: 24 @@ -104,7 +104,7 @@ always: - name: Delete lifecycle hook - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: autoscaling_group_name: "{{ asg_name }}" lifecycle_hook_name: "{{ resource_prefix }}-lifecycle-hook" state: absent @@ -112,7 +112,7 @@ ignore_errors: True - name: Delete lifecycle hook - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: autoscaling_group_name: "{{ asg_name }}" lifecycle_hook_name: "{{ resource_prefix }}-lifecycle-hook-terminate" state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml index 32cfd5378..5b754d47d 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml @@ -2,9 +2,9 @@ - name: setup credentials and region module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: @@ -47,7 +47,7 @@ - "{{ testing_subnet.subnet.id }}" - name: create a security group with the vpc created in the ec2_setup - ec2_group: + ec2_security_group: name: "{{ sg_name }}" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" @@ -63,7 +63,7 @@ register: sg - name: ensure launch configs exist - ec2_lc: + autoscaling_launch_config: name: "{{ item }}" assign_public_ip: true image_id: "{{ ec2_ami_id }}" @@ -81,7 +81,7 @@ - "{{ lc_name_2 }}" - name: launch asg and do not wait for instances to be deemed healthy (no ELB) - ec2_asg: + autoscaling_group: name: "{{ asg_name }}" launch_config_name: "{{ lc_name_1 }}" desired_capacity: 1 @@ -99,7 +99,7 @@ # ============================================================ - name: test invalid cancelation - V1 - (pre-refresh) - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" ignore_errors: yes @@ -107,10 +107,10 @@ - assert: that: - - "'An error occurred (ActiveInstanceRefreshNotFound) when calling the CancelInstanceRefresh operation: No in progress or pending Instance Refresh found for Auto Scaling group {{ resource_prefix }}-asg' in result.msg" + - "'An error occurred (ActiveInstanceRefreshNotFound) when calling the CancelInstanceRefresh operation: No in progress or pending Instance Refresh found for Auto Scaling group ' ~ resource_prefix ~ '-asg' in result.msg" - name: test starting a refresh with a valid ASG name - check_mode - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" check_mode: true @@ -123,7 +123,7 @@ - '"autoscaling:StartInstanceRefresh" not in output.resource_actions' - name: test starting a refresh with a valid ASG name - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" register: output @@ -133,7 +133,7 @@ - "'instance_refresh_id' in output.instance_refreshes" - name: test starting a refresh with a valid ASG name - Idempotent - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" ignore_errors: true @@ -145,7 +145,7 @@ - '"Failed to start InstanceRefresh: An error occurred (InstanceRefreshInProgress) when calling the StartInstanceRefresh operation: An Instance Refresh is already in progress and blocks the execution of this Instance Refresh." in output.msg' - name: test starting a refresh with a valid ASG name - Idempotent (check_mode) - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" ignore_errors: true @@ -159,7 +159,7 @@ - '"In check_mode - Instance Refresh is already in progress, can not start new instance refresh." in output.msg' - name: test starting a refresh with a nonexistent ASG name - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "nonexistentname-asg" state: "started" ignore_errors: yes @@ -170,7 +170,7 @@ - "'Failed to start InstanceRefresh: An error occurred (ValidationError) when calling the StartInstanceRefresh operation: AutoScalingGroup name not found' in result.msg" - name: test canceling a refresh with an ASG name - check_mode - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" check_mode: true @@ -183,7 +183,7 @@ - '"autoscaling:CancelInstanceRefresh" not in output.resource_actions' - name: test canceling a refresh with an ASG name - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" register: output @@ -193,7 +193,7 @@ - "'instance_refresh_id' in output.instance_refreshes" - name: test canceling a refresh with a ASG name - Idempotent - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" ignore_errors: yes @@ -204,7 +204,7 @@ - output is not changed - name: test cancelling a refresh with a valid ASG name - Idempotent (check_mode) - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" ignore_errors: true @@ -217,7 +217,7 @@ - output is not failed - name: test starting a refresh with an ASG name and preferences dict - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" preferences: @@ -232,7 +232,7 @@ - "'instance_refresh_id' in output.instance_refreshes" - name: re-test canceling a refresh with an ASG name - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" register: output @@ -242,7 +242,7 @@ - "'instance_refresh_id' in output.instance_refreshes" - name: test valid start - V1 - (with preferences missing instance_warmup) - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" preferences: @@ -257,7 +257,7 @@ - "'instance_refresh_id' in output.instance_refreshes" - name: re-test canceling a refresh with an ASG name - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" register: output @@ -267,7 +267,7 @@ - "'instance_refresh_id' in output.instance_refreshes" - name: test valid start - V2 - (with preferences missing min_healthy_percentage) - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" preferences: @@ -282,7 +282,7 @@ - "'instance_refresh_id' in output.instance_refreshes" - name: test invalid cancelation - V2 - (with preferences) - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" preferences: @@ -302,7 +302,7 @@ loop: "{{ query('sequence', 'start=1 end=3') }}" - name: test getting info for an ASG name - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" region: "{{ aws_region }}" ignore_errors: yes @@ -315,7 +315,7 @@ inst_refresh_id_json_query: instance_refreshes[].instance_refresh_id - name: test using fake refresh ID - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" ids: ['0e367f58-blabla-bla-bla-ca870dc5dbfe'] ignore_errors: yes @@ -323,10 +323,10 @@ - assert: that: - - "{{ output.instance_refreshes|length }} == 0" + - output.instance_refreshes | length == 0 - name: test using a real refresh ID - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" ids: [ '{{ refreshout.instance_refreshes.instance_refresh_id }}' ] ignore_errors: yes @@ -334,10 +334,10 @@ - assert: that: - - "{{ output.instance_refreshes |length }} == 1" + - output.instance_refreshes | length == 1 - name: test getting info for an ASG name which doesn't exist - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: n0n3x1stentname27b ignore_errors: yes register: output @@ -347,17 +347,17 @@ - "'Failed to describe InstanceRefreshes: An error occurred (ValidationError) when calling the DescribeInstanceRefreshes operation: AutoScalingGroup name not found - AutoScalingGroup n0n3x1stentname27b not found' == output.msg" - name: assert that the correct number of records are returned - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" ignore_errors: yes register: output - assert: that: - - "{{ output.instance_refreshes|length }} == 7" + - output.instance_refreshes | length == 7 - name: assert that valid message with fake-token is returned - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" next_token: "fake-token-123" ignore_errors: yes @@ -368,7 +368,7 @@ - '"Failed to describe InstanceRefreshes: An error occurred (InvalidNextToken) when calling the DescribeInstanceRefreshes operation: The token ''********'' is invalid." == output.msg' - name: assert that max records=1 returns no more than one record - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" max_records: 1 ignore_errors: yes @@ -376,10 +376,10 @@ - assert: that: - - "{{ output.instance_refreshes|length }} < 2" + - output.instance_refreshes | length < 2 - name: assert that valid message with real-token is returned - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" next_token: "{{ output.next_token }}" ignore_errors: yes @@ -387,10 +387,10 @@ - assert: that: - - "{{ output.instance_refreshes|length }} == 7" + - output.instance_refreshes | length == 7 - name: test using both real nextToken and max_records=1 - ec2_asg_instance_refresh_info: + autoscaling_instance_refresh_info: name: "{{ asg_name }}" max_records: 1 next_token: "{{ output.next_token }}" @@ -399,12 +399,12 @@ - assert: that: - - "{{ output.instance_refreshes|length }} == 1" + - output.instance_refreshes | length == 1 always: - name: kill asg - ec2_asg: + autoscaling_group: name: "{{ asg_name }}" state: absent register: removed @@ -414,7 +414,7 @@ # Remove the testing dependencies - name: remove the load balancer - ec2_elb_lb: + elb_classic_lb: name: "{{ load_balancer_name }}" state: absent security_group_ids: @@ -440,7 +440,7 @@ retries: 10 - name: remove launch configs - ec2_lc: + autoscaling_launch_config: name: "{{ item }}" state: absent register: removed @@ -461,7 +461,7 @@ ignore_errors: true - name: remove the security group - ec2_group: + ec2_security_group: name: "{{ sg_name }}" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/refresh_and_cancel_three_times.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/refresh_and_cancel_three_times.yml index 15fa2100c..9b051a054 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/refresh_and_cancel_three_times.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_instance_refresh/tasks/refresh_and_cancel_three_times.yml @@ -1,17 +1,17 @@ --- - name: try to cancel pre-loop - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" ignore_errors: yes - name: test starting a refresh with an ASG name - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "started" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" region: "{{ aws_region }}" ignore_errors: no retries: 10 @@ -20,10 +20,10 @@ until: refreshout is not failed - name: test cancelling a refresh with an ASG name - ec2_asg_instance_refresh: + autoscaling_instance_refresh: name: "{{ asg_name }}" state: "cancelled" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" region: "{{ aws_region }}" ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_cleanup.yml index 9e5ae6a93..ce626b69c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_cleanup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_cleanup.yml @@ -24,7 +24,7 @@ retries: 10 - name: remove the security group - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_setup.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_setup.yml index 88f5bb6fe..d48bae66c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/env_setup.yml @@ -48,7 +48,7 @@ - "{{ testing_subnet_b.subnet.id }}" - name: create a security group with the vpc - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/main.yml index 6606484b1..da1f2fb1f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_launch_config/tasks/main.yml @@ -1,9 +1,9 @@ - name: run ec2_lc tests module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws @@ -14,7 +14,7 @@ include_tasks: env_setup.yml - name: Create launch configuration 1 - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc1' image_id: '{{ ec2_ami_id }}' assign_public_ip: yes @@ -28,7 +28,7 @@ register: lc_1_create - name: Gather information about launch configuration 1 - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: name: '{{ resource_prefix }}-lc1' register: lc_1_info_result @@ -42,7 +42,7 @@ - lc_1_info_result.launch_configurations[0].instance_type == 't2.micro' - name: Create launch configuration 1 - Idempotency - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc1' image_id: '{{ ec2_ami_id }}' assign_public_ip: yes @@ -61,7 +61,7 @@ - '"autoscaling:CreateLaunchConfiguration" not in lc_1_create_idem.resource_actions' - name: Create launch configuration 2 - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc2' image_id: '{{ ec2_ami_id }}' assign_public_ip: yes @@ -75,7 +75,7 @@ register: lc_2_create - name: Gather information about launch configuration 2 - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: name: '{{ resource_prefix }}-lc2' register: lc_2_info_result @@ -90,7 +90,7 @@ - '"autoscaling:CreateLaunchConfiguration" in lc_2_create.resource_actions' - name: Create launch configuration 2 - Idempotency - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc2' image_id: '{{ ec2_ami_id }}' assign_public_ip: yes @@ -109,7 +109,7 @@ - '"autoscaling:CreateLaunchConfiguration" not in lc_2_create_idem.resource_actions' - name: Create launch configuration 3 - test throughput parameter - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc3' image_id: '{{ ec2_ami_id }}' instance_type: '{{ ec2_instance_type }}' @@ -122,7 +122,7 @@ register: lc_3_create - name: Gather information about launch configuration 3 - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: name: '{{ resource_prefix }}-lc3' register: lc_3_info_result @@ -137,7 +137,7 @@ - '"autoscaling:CreateLaunchConfiguration" in lc_3_create.resource_actions' - name: Create launch configuration 3 - Idempotency - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc3' image_id: '{{ ec2_ami_id }}' instance_type: '{{ ec2_instance_type }}' @@ -155,7 +155,7 @@ - '"autoscaling:CreateLaunchConfiguration" not in lc_3_create_idem.resource_actions' - name: Search for the Launch Configurations that start with test resource_prefix - community.aws.ec2_lc_find: + community.aws.autoscaling_launch_config_find: name_regex: '{{ resource_prefix }}*' sort_order: descending register: lc_find_result @@ -166,7 +166,7 @@ - '"autoscaling:DescribeLaunchConfigurations" in lc_find_result.resource_actions' - name: Delete launch configuration 1 - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc1' state: absent register: lc_1_delete @@ -177,7 +177,7 @@ - '"autoscaling:DeleteLaunchConfiguration" in lc_1_delete.resource_actions' - name: Delete launch configuration 1 - Idempotency - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc1' state: absent register: lc_1_delete_idem @@ -188,7 +188,7 @@ - '"autoscaling:DeleteLaunchConfiguration" not in lc_1_delete_idem.resource_actions' - name: Gather information about launch configuration 1 - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: name: '{{ resource_prefix }}-lc1' register: lc_1_info_result @@ -198,7 +198,7 @@ - lc_1_info_result.launch_configurations | length == 0 - name: Delete launch configuration 2 - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc2' state: absent register: lc_2_delete @@ -209,7 +209,7 @@ - '"autoscaling:DeleteLaunchConfiguration" in lc_2_delete.resource_actions' - name: Delete launch configuration 2 - Idempotency - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc2' state: absent register: lc_2_delete_idem @@ -220,7 +220,7 @@ - '"autoscaling:DeleteLaunchConfiguration" not in lc_2_delete_idem.resource_actions' - name: Gather information about launch configuration 2 - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: name: '{{ resource_prefix }}-lc2' register: lc_2_info_result @@ -230,7 +230,7 @@ - lc_2_info_result.launch_configurations | length == 0 - name: Delete launch configuration 3 - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc3' state: absent register: lc_3_delete @@ -241,7 +241,7 @@ - '"autoscaling:DeleteLaunchConfiguration" in lc_3_delete.resource_actions' - name: Delete launch configuration 3 - Idempotency - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: '{{ resource_prefix }}-lc3' state: absent register: lc_3_delete_idem @@ -252,7 +252,7 @@ - '"autoscaling:DeleteLaunchConfiguration" not in lc_3_delete_idem.resource_actions' - name: Gather information about launch configuration 3 - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: name: '{{ resource_prefix }}-lc2' register: lc_3_info_result diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/main.yml index a22182146..e8fdfd37b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/main.yml @@ -2,40 +2,38 @@ # Beware: most of our tests here are run in parallel. # To add new tests you'll need to add a new host to the inventory and a matching # '{{ inventory_hostname }}'.yml file in roles/ec2_asg_lifecycle_hook/tasks/ - - # Prepare the VPC and figure out which AMI to use - hosts: all - gather_facts: no + gather_facts: false tasks: - - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - vars: + - module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + vars: # We can't just use "run_once" because the facts don't propagate when # running an 'include' that was run_once - setup_run_once: yes - block: - - include_role: - name: 'setup_ec2_facts' - - include_role: - name: 'ec2_asg_lifecycle_hook' - tasks_from: env_setup.yml - rescue: - - include_role: - name: 'ec2_asg_lifecycle_hook' - tasks_from: env_cleanup.yml - run_once: yes - - fail: - msg: 'Environment preparation failed' - run_once: yes + setup_run_once: true + block: + - ansible.builtin.include_role: + name: setup_ec2_facts + - ansible.builtin.include_role: + name: ec2_asg_lifecycle_hook + tasks_from: env_setup.yml + rescue: + - ansible.builtin.include_role: + name: ec2_asg_lifecycle_hook + tasks_from: env_cleanup.yml + run_once: true + - ansible.builtin.fail: + msg: Environment preparation failed + run_once: true # VPC should get cleaned up once all hosts have run - hosts: all - gather_facts: no + gather_facts: false strategy: free serial: 6 roles: diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/meta/main.yml index 1471b11f6..fcadd50dc 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - setup_ec2_facts diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/create_update_delete.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/create_update_delete.yml index 800ee6358..f6b92213e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/create_update_delete.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/create_update_delete.yml @@ -2,47 +2,46 @@ - name: Test create/update/delete AutoScalingGroups Lifecycle Hooks with ec2_asg_lifecycle_hook block: - #---------------------------------------------------------------------- - - name: create a launch configuration - ec2_lc: + # ---------------------------------------------------------------------- + - name: Create a launch configuration + community.aws.autoscaling_launch_config: name: "{{ resource_prefix }}-lc" image_id: "{{ ec2_ami_id }}" region: "{{ aws_region }}" instance_type: t2.micro - assign_public_ip: yes + assign_public_ip: true register: create_lc - - name: ensure that lc is created - assert: + - name: Ensure that lc is created + ansible.builtin.assert: that: - create_lc is changed - create_lc.failed is false - #---------------------------------------------------------------------- - - name: create a AutoScalingGroup - ec2_asg: + # ---------------------------------------------------------------------- + - name: Create a AutoScalingGroup + amazon.aws.autoscaling_group: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_period: 60 health_check_type: ELB - replace_all_instances: yes + replace_all_instances: true min_size: 1 max_size: 1 desired_capacity: 1 region: "{{ aws_region }}" register: create_asg - - name: ensure that AutoScalingGroup is created - assert: + - name: Ensure that AutoScalingGroup is created + ansible.builtin.assert: that: - create_asg is changed - create_asg.failed is false - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions' - #---------------------------------------------------------------------- - + # ---------------------------------------------------------------------- - name: Create lifecycle hook - check_mode - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -53,7 +52,7 @@ check_mode: true register: output - - assert: + - ansible.builtin.assert: that: - output is changed - output is not failed @@ -61,7 +60,7 @@ - '"Would have created AutoScalingGroup Lifecycle Hook if not in check_mode" in output.msg' - name: Create lifecycle hook - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -71,7 +70,7 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output is changed - output is not failed @@ -79,7 +78,7 @@ - output.lifecycle_hook_info[0].heartbeat_timeout == 7000 - name: Create lifecycle hook - Idempotency - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -89,14 +88,14 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - output is not failed - '"lifecycle_hook_info" not in output' - name: Create lifecycle hook - check_mode (Idempotency) - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -107,14 +106,14 @@ check_mode: true register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - output is not failed - '"lifecycle_hook_info" not in output' - name: Update lifecycle hook - check_mode - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -125,7 +124,7 @@ check_mode: true register: output - - assert: + - ansible.builtin.assert: that: - output is changed - output is not failed @@ -133,7 +132,7 @@ - '"Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode." in output.msg' - name: Update lifecycle hook - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -143,7 +142,7 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output is changed - output is not failed @@ -151,7 +150,7 @@ - output.lifecycle_hook_info[0].heartbeat_timeout == 6000 - name: Update lifecycle hook - Idempotency - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -161,14 +160,14 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - output is not failed - '"lifecycle_hook_info" not in output' - name: Update lifecycle hook - check_mode (Idempotency) - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -179,14 +178,14 @@ check_mode: true register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - output is not failed - '"lifecycle_hook_info" not in output' - name: Delete lifecycle hook - check_mode - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -194,7 +193,7 @@ check_mode: true register: output - - assert: + - ansible.builtin.assert: that: - output is changed - output is not failed @@ -202,35 +201,35 @@ - '"Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode." in output.msg' - name: Delete lifecycle hook - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is changed - output is not failed - '"lifecycle_hook_removed" in output' - name: Delete lifecycle hook - Idempotency - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - output is not failed - '"lifecycle_hook_removed" not in output' - name: Delete lifecycle hook - check_mode (Idempotency) - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: "{{ aws_region }}" autoscaling_group_name: "{{ resource_prefix }}-asg" lifecycle_hook_name: "{{ resource_prefix }}-test-hook" @@ -238,7 +237,7 @@ check_mode: true register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - output is not failed diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_cleanup.yml index 3b4ee869b..1befe278a 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_cleanup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_cleanup.yml @@ -1,5 +1,6 @@ -- name: kill asg - ec2_asg: +--- +- name: Kill asg + amazon.aws.autoscaling_group: name: "{{ resource_prefix }}-asg" state: absent register: removed @@ -8,8 +9,8 @@ retries: 10 # Remove the testing dependencies -- name: remove target group - elb_target_group: +- name: Remove target group + community.aws.elb_target_group: name: "{{ item }}" state: absent register: removed @@ -20,8 +21,8 @@ - "{{ tg1_name }}" - "{{ tg2_name }}" -- name: remove the load balancer - ec2_elb_lb: +- name: Remove the load balancer + amazon.aws.elb_classic_lb: name: "{{ load_balancer_name }}" state: absent security_group_ids: @@ -34,20 +35,20 @@ load_balancer_port: 80 instance_port: 80 health_check: - ping_protocol: tcp - ping_port: 80 - ping_path: "/" - response_timeout: 5 - interval: 10 - unhealthy_threshold: 4 - healthy_threshold: 2 + ping_protocol: tcp + ping_port: 80 + ping_path: / + response_timeout: 5 + interval: 10 + unhealthy_threshold: 4 + healthy_threshold: 2 register: removed until: removed is not failed ignore_errors: true retries: 10 -- name: remove launch configs - ec2_lc: +- name: Remove launch configs + community.aws.autoscaling_launch_config: name: "{{ item }}" state: absent register: removed @@ -57,8 +58,8 @@ loop: - "{{ resource_prefix }}-lc" -- name: delete launch template - ec2_launch_template: +- name: Delete launch template + community.aws.ec2_launch_template: name: "{{ resource_prefix }}-lt" state: absent register: del_lt @@ -66,8 +67,8 @@ until: del_lt is not failed ignore_errors: true -- name: remove the security group - ec2_group: +- name: Remove the security group + amazon.aws.ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" @@ -77,14 +78,14 @@ ignore_errors: true retries: 10 -- name: remove routing rules - ec2_vpc_route_table: +- name: Remove routing rules + amazon.aws.ec2_vpc_route_table: state: absent vpc_id: "{{ testing_vpc.vpc.id }}" tags: created: "{{ resource_prefix }}-route" routes: - - dest: 0.0.0.0/0 + - dest: "0.0.0.0/0" gateway_id: "{{ igw.gateway_id }}" subnets: - "{{ testing_subnet.subnet.id }}" @@ -93,8 +94,8 @@ ignore_errors: true retries: 10 -- name: remove internet gateway - ec2_vpc_igw: +- name: Remove internet gateway + amazon.aws.ec2_vpc_igw: vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed @@ -102,8 +103,8 @@ ignore_errors: true retries: 10 -- name: remove the subnet - ec2_vpc_subnet: +- name: Remove the subnet + amazon.aws.ec2_vpc_subnet: state: absent vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.55.77.0/24 @@ -112,8 +113,8 @@ ignore_errors: true retries: 10 -- name: remove the VPC - ec2_vpc_net: +- name: Remove the VPC + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.55.77.0/24 state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_setup.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_setup.yml index 8e9be1d55..d51654310 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/env_setup.yml @@ -1,25 +1,25 @@ +--- - name: Run ec2_asg_lifecycle_hook integration tests. block: - # ============================================================ # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations - name: Create VPC for use in testing - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.55.77.0/24 tenancy: default register: testing_vpc - name: Create internet gateway for use in testing - ec2_vpc_igw: + amazon.aws.ec2_vpc_igw: vpc_id: "{{ testing_vpc.vpc.id }}" state: present register: igw - name: Create subnet for use in testing - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: state: present vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.55.77.0/24 @@ -28,19 +28,19 @@ Name: "{{ resource_prefix }}-subnet" register: testing_subnet - - name: create routing rules - ec2_vpc_route_table: + - name: Create routing rules + amazon.aws.ec2_vpc_route_table: vpc_id: "{{ testing_vpc.vpc.id }}" tags: created: "{{ resource_prefix }}-route" routes: - - dest: 0.0.0.0/0 + - dest: "0.0.0.0/0" gateway_id: "{{ igw.gateway_id }}" subnets: - "{{ testing_subnet.subnet.id }}" - - name: create a security group with the vpc created in the ec2_setup - ec2_group: + - name: Create a security group with the vpc created in the ec2_setup + amazon.aws.ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" @@ -48,9 +48,9 @@ - proto: tcp from_port: 22 to_port: 22 - cidr_ip: 0.0.0.0/0 + cidr_ip: "0.0.0.0/0" - proto: tcp from_port: 80 to_port: 80 - cidr_ip: 0.0.0.0/0 + cidr_ip: "0.0.0.0/0" register: sg diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/main.yml index 16442c7fa..e38324bda 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_lifecycle_hook/roles/ec2_asg_lifecycle_hook/tasks/main.yml @@ -3,38 +3,36 @@ # To add new tests you'll need to add a new host to the inventory and a matching # '{{ inventory_hostname }}'.yml file in roles/ec2_asg_lifecycle_hook/tasks/ -- name: "Wrap up all tests and setup AWS credentials" +- name: Wrap up all tests and setup AWS credentials module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" aws_config: retries: # Unfortunately AWSRetry doesn't support paginators and boto3's paginators # don't support any configuration of the delay between retries. max_attempts: 20 - collections: - - community.aws block: - - debug: - msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}" - - include_tasks: '{{ inventory_hostname }}.yml' - - debug: - msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}" + - ansible.builtin.debug: + msg: "{{ inventory_hostname }} start: {{ lookup('pipe', 'date') }}" + - ansible.builtin.include_tasks: "{{ inventory_hostname }}.yml" + - ansible.builtin.debug: + msg: "{{ inventory_hostname }} finish: {{ lookup('pipe', 'date') }}" always: - - set_fact: - _role_complete: True + - ansible.builtin.set_fact: + _role_complete: true - vars: completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}' - hosts_in_play: '{{ ansible_play_hosts_all | length }}' - debug: + hosts_in_play: "{{ ansible_play_hosts_all | length }}" + ansible.builtin.debug: msg: "{{ completed_hosts }} of {{ hosts_in_play }} complete" - - include_tasks: env_cleanup.yml + - ansible.builtin.include_tasks: env_cleanup.yml vars: completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}' - hosts_in_play: '{{ ansible_play_hosts_all | length }}' + hosts_in_play: "{{ ansible_play_hosts_all | length }}" when: - - completed_hosts == hosts_in_play + - completed_hosts == hosts_in_play diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_policy/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_policy/tasks/main.yml index 24b3eea62..684522d64 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_policy/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_policy/tasks/main.yml @@ -12,22 +12,22 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - amazon.aws block: - name: create trivial launch_configuration - ec2_lc: + autoscaling_launch_config: name: "{{ scaling_policy_lc_name }}" state: present instance_type: t3.nano image_id: "{{ ec2_ami_id }}" - name: create trivial ASG - ec2_asg: + autoscaling_group: name: "{{ scaling_policy_asg_name }}" state: present launch_config_name: "{{ scaling_policy_lc_name }}" @@ -36,7 +36,7 @@ desired_capacity: 0 - name: Create Simple Scaling policy using implicit defaults - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_simplescaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: present @@ -46,11 +46,11 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_simplescaling_policy" + - result.policy_name == resource_prefix ~ '_simplescaling_policy' - result.changed - name: Update Simple Scaling policy using explicit defaults - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_simplescaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: present @@ -61,11 +61,11 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_simplescaling_policy" + - result.policy_name == resource_prefix ~ '_simplescaling_policy' - not result.changed - name: min_adjustment_step is ignored with ChangeInCapacity - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_simplescaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: present @@ -77,12 +77,12 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_simplescaling_policy" + - result.policy_name == resource_prefix ~ '_simplescaling_policy' - not result.changed - result.adjustment_type == "ChangeInCapacity" - name: Change Simple Scaling policy adjustment_type to PercentChangeInCapacity - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_simplescaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: present @@ -94,12 +94,12 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_simplescaling_policy" + - result.policy_name == resource_prefix ~ '_simplescaling_policy' - result.changed - result.adjustment_type == "PercentChangeInCapacity" - name: Remove Simple Scaling policy - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_simplescaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: absent @@ -110,7 +110,7 @@ - result.changed - name: Create Step Scaling policy - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_stepscaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: present @@ -126,11 +126,11 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_stepscaling_policy" + - result.policy_name == resource_prefix ~ '_stepscaling_policy' - result.changed - name: Add another step - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_stepscaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: present @@ -149,12 +149,12 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_stepscaling_policy" + - result.policy_name == resource_prefix ~ '_stepscaling_policy' - result.changed - result.adjustment_type == "PercentChangeInCapacity" - name: Remove Step Scaling policy - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_stepscaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: absent @@ -165,7 +165,7 @@ - result.changed - name: Remove Step Scaling policy (idemopotency) - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_stepscaling_policy" asg_name: "{{ scaling_policy_asg_name }}" state: absent @@ -177,7 +177,7 @@ - result is successful - name: create TargetTracking predefined policy - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_targettracking_predefined_policy" policy_type: TargetTrackingScaling target_tracking_config: @@ -189,12 +189,12 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_targettracking_predefined_policy" + - result.policy_name == resource_prefix ~ '_targettracking_predefined_policy' - result.changed - result is successful - name: create TargetTrackingScaling predefined policy (idempotency) - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_targettracking_predefined_policy" policy_type: TargetTrackingScaling target_tracking_config: @@ -206,12 +206,12 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_targettracking_predefined_policy" + - result.policy_name == resource_prefix ~ '_targettracking_predefined_policy' - result is not changed # # It would be good to also test this but we would need an Target group and an ALB # - name: create TargetTracking predefined policy with resource_label -# ec2_scaling_policy: +# autoscaling_policy: # name: "{{ resource_prefix }}_targettracking_predefined_rl_policy" # policy_type: TargetTrackingScaling # target_tracking_config: @@ -229,7 +229,7 @@ # - result is successful # # - name: create TargetTracking predefined policy with resource_label (idempotency) -# ec2_scaling_policy: +# autoscaling_policy: # name: "{{ resource_prefix }}_targettracking_predefined_rl_policy" # policy_type: TargetTrackingScaling # target_tracking_config: @@ -246,7 +246,7 @@ # - result is not changed - name: create TargetTrackingScaling custom policy - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_targettracking_custom_policy" policy_type: TargetTrackingScaling target_tracking_config: @@ -263,12 +263,12 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_targettracking_custom_policy" + - result.policy_name == resource_prefix ~ '_targettracking_custom_policy' - result.changed - result is successful - name: create TargetTrackingScaling custom policy (idempotency) - ec2_scaling_policy: + autoscaling_policy: name: "{{ resource_prefix }}_targettracking_custom_policy" policy_type: TargetTrackingScaling target_tracking_config: @@ -285,14 +285,14 @@ - assert: that: - - result.policy_name == "{{ resource_prefix }}_targettracking_custom_policy" + - result.policy_name == resource_prefix ~ '_targettracking_custom_policy' - result is not changed always: # ============================================================ - name: Remove the scaling policies - ec2_scaling_policy: + autoscaling_policy: name: "{{ item }}" state: absent register: result @@ -305,13 +305,13 @@ ignore_errors: yes - name: remove the ASG - ec2_asg: + autoscaling_group: name: "{{ scaling_policy_asg_name }}" state: absent ignore_errors: yes - name: remove the Launch Configuration - ec2_lc: + autoscaling_launch_config: name: "{{ scaling_policy_lc_name }}" state: absent ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml index c78c7efae..4c0e97220 100644 --- a/ansible_collections/community/aws/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml @@ -5,9 +5,9 @@ - community.aws module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: ## Set up the testing dependencies: VPC, subnet, security group, and launch configuration @@ -29,7 +29,7 @@ register: testing_subnet - name: create a security group with the vpc created in the ec2_setup - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" @@ -45,7 +45,7 @@ register: sg - name: ensure launch configs exist - ec2_lc: + autoscaling_launch_config: name: "{{ resource_prefix }}-lc" assign_public_ip: true image_id: "{{ ec2_ami_id }}" @@ -53,7 +53,7 @@ instance_type: t3.micro - name: Create ASG ready - ec2_asg: + autoscaling_group: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" desired_capacity: 1 @@ -70,10 +70,10 @@ ## Create minimal basic scheduled action - name: Create basic scheduled_action - check_mode - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test" - start_time: 2022 October 25 08:00 UTC + start_time: 2027 November 9 08:00 UTC recurrence: 40 22 * * 1-5 desired_capacity: 2 state: present @@ -87,10 +87,10 @@ - scheduled_action is changed - name: Create basic scheduled_action - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test" - start_time: 2022 October 25 08:00 UTC + start_time: 2027 November 9 08:00 UTC recurrence: 40 22 * * 1-5 desired_capacity: 2 state: present @@ -101,14 +101,14 @@ that: - scheduled_action is successful - scheduled_action is changed - - scheduled_action.scheduled_action_name == "{{ resource_prefix }}-test" + - scheduled_action.scheduled_action_name == resource_prefix ~ '-test' - scheduled_action.desired_capacity == 2 - name: Create basic scheduled_action - idempotent - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test" - start_time: 2022 October 25 08:00 UTC + start_time: 2027 November 9 08:00 UTC recurrence: 40 22 * * 1-5 desired_capacity: 2 state: present @@ -122,10 +122,10 @@ ## Update minimal basic scheduled action - name: Update basic scheduled_action - check_mode - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test" - start_time: 2022 October 25 08:00 UTC + start_time: 2027 November 9 08:00 UTC recurrence: 40 22 * * 1-5 desired_capacity: 3 min_size: 3 @@ -140,10 +140,10 @@ - scheduled_action is changed - name: Update basic scheduled_action - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test" - start_time: 2022 October 25 08:00 UTC + start_time: 2027 November 9 08:00 UTC recurrence: 40 22 * * 1-5 desired_capacity: 3 min_size: 3 @@ -155,15 +155,15 @@ that: - scheduled_action is successful - scheduled_action is changed - - scheduled_action.scheduled_action_name == "{{ resource_prefix }}-test" + - scheduled_action.scheduled_action_name == resource_prefix ~ '-test' - scheduled_action.desired_capacity == 3 - scheduled_action.min_size == 3 - name: Update basic scheduled_action - idempotent - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test" - start_time: 2022 October 25 08:00 UTC + start_time: 2027 November 9 08:00 UTC recurrence: 40 22 * * 1-5 desired_capacity: 3 min_size: 3 @@ -178,11 +178,11 @@ ## Create advanced scheduled action - name: Create advanced scheduled_action - check_mode - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test" - start_time: 2022 October 25 09:00 UTC - end_time: 2022 October 25 10:00 UTC + start_time: 2027 November 9 09:00 UTC + end_time: 2027 November 9 10:00 UTC time_zone: Europe/London recurrence: 40 22 * * 1-5 min_size: 2 @@ -199,11 +199,11 @@ - advanced_scheduled_action is changed - name: Create advanced scheduled_action - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test1" - start_time: 2022 October 25 09:00 UTC - end_time: 2022 October 25 10:00 UTC + start_time: 2027 November 9 09:00 UTC + end_time: 2027 November 9 10:00 UTC time_zone: Europe/London recurrence: 40 22 * * 1-5 min_size: 2 @@ -217,18 +217,18 @@ that: - advanced_scheduled_action is successful - advanced_scheduled_action is changed - - advanced_scheduled_action.scheduled_action_name == "{{ resource_prefix }}-test1" + - advanced_scheduled_action.scheduled_action_name == resource_prefix ~ '-test1' - advanced_scheduled_action.desired_capacity == 2 - advanced_scheduled_action.min_size == 2 - advanced_scheduled_action.max_size == 5 - advanced_scheduled_action.time_zone == "Europe/London" - name: Create advanced scheduled_action - idempotent - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test1" - start_time: 2022 October 25 09:00 UTC - end_time: 2022 October 25 10:00 UTC + start_time: 2027 November 9 09:00 UTC + end_time: 2027 November 9 10:00 UTC time_zone: Europe/London recurrence: 40 22 * * 1-5 min_size: 2 @@ -245,7 +245,7 @@ ## Delete scheduled action - name: Delete scheduled_action - check_mode - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test1" state: absent @@ -259,7 +259,7 @@ - scheduled_action_deletion is changed - name: Delete scheduled_action - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test1" state: absent @@ -272,7 +272,7 @@ - scheduled_action_deletion is changed - name: Delete scheduled_action - idempotent - ec2_asg_scheduled_action: + autoscaling_scheduled_action: autoscaling_group_name: "{{ resource_prefix }}-asg" scheduled_action_name: "{{ resource_prefix }}-test1" state: absent @@ -285,7 +285,7 @@ - scheduled_action_deletion is not changed always: - name: Remove ASG - ec2_asg: + autoscaling_group: name: "{{ resource_prefix }}-asg" state: absent register: removed @@ -295,7 +295,7 @@ # Remove the testing dependencies - name: Remove launch configs - ec2_lc: + autoscaling_launch_config: name: "{{ resource_prefix }}-lc" state: absent register: removed @@ -304,7 +304,7 @@ retries: 10 - name: Remove the security group - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/main.yml b/ansible_collections/community/aws/tests/integration/targets/aws_region_info/main.yml deleted file mode 100644 index abffda916..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/main.yml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: localhost - connection: local - environment: "{{ ansible_test.environment }}" - tasks: - - include_tasks: 'tasks/tests.yml' diff --git a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/aws_region_info/tasks/main.yml deleted file mode 100644 index 3edbbaded..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/tasks/main.yml +++ /dev/null @@ -1,107 +0,0 @@ ---- -- module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' - - block: - - name: 'List available Regions' - aws_region_info: - register: regions - - - name: check task return attributes - vars: - first_region: '{{ regions.regions[0] }}' - assert: - that: - - regions is successful - - regions is not changed - - '"regions" in regions' - - '"endpoint" in first_region' - - '"opt_in_status" in first_region' - - '"region_name" in first_region' - - - name: 'List available Regions - check_mode' - aws_region_info: - register: check_regions - - - name: check task return attributes - check_mode - vars: - first_region: '{{ check_regions.regions[0] }}' - assert: - that: - - check_regions is successful - - check_regions is not changed - - '"regions" in check_regions' - - '"endpoint" in first_region' - - '"opt_in_status" in first_region' - - '"region_name" in first_region' - - - name: 'Filter available Regions using - ("region-name")' - aws_region_info: - filters: - region-name: 'us-west-1' - register: us_west_1 - - - name: check task return attributes - filtering using - - vars: - first_region: '{{ us_west_1.regions[0] }}' - assert: - that: - - us_west_1 is successful - - us_west_1 is not changed - - '"regions" in us_west_1' - - us_west_1.regions | length == 1 - - '"endpoint" in first_region' - - first_region.endpoint == 'ec2.us-west-1.amazonaws.com' - - '"opt_in_status" in first_region' - - first_region.opt_in_status == 'opt-in-not-required' - - '"region_name" in first_region' - - first_region.region_name == 'us-west-1' - - - name: 'Filter available Regions using _ ("region_name")' - aws_region_info: - filters: - region_name: 'us-west-2' - register: us_west_2 - - - name: check task return attributes - filtering using _ - vars: - first_region: '{{ us_west_2.regions[0] }}' - assert: - that: - - us_west_2 is successful - - us_west_2 is not changed - - '"regions" in us_west_2' - - us_west_2.regions | length == 1 - - '"endpoint" in first_region' - - first_region.endpoint == 'ec2.us-west-2.amazonaws.com' - - '"opt_in_status" in first_region' - - first_region.opt_in_status == 'opt-in-not-required' - - '"region_name" in first_region' - - first_region.region_name == 'us-west-2' - - - name: 'Filter available Regions using _ and - to check precedence' - aws_region_info: - filters: - region-name: 'eu-west-1' - region_name: 'eu-central-1' - register: regions_prededence - - - name: check task return attributes - precedence - vars: - first_region: '{{ regions_prededence.regions[0] }}' - assert: - that: - - regions_prededence is successful - - regions_prededence is not changed - - '"regions" in regions_prededence' - - regions_prededence.regions | length == 1 - - '"endpoint" in first_region' - - first_region.endpoint == 'ec2.eu-central-1.amazonaws.com' - - '"opt_in_status" in first_region' - - first_region.opt_in_status == 'opt-in-not-required' - - '"region_name" in first_region' - - first_region.region_name == 'eu-central-1' diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml index eb703d49e..f1b99df1b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml @@ -1,9 +1,9 @@ - name: set connection information for aws modules and run tasks module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml index afd614a55..39f13a71f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml @@ -5,14 +5,14 @@ - name: set up aws connection info set_fact: aws_connection_info: &aws_connection_info - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" aws_secondary_connection_info: &aws_secondary_connection_info - aws_access_key: "{{ secondary_aws_access_key }}" - aws_secret_key: "{{ secondary_aws_secret_key }}" - security_token: "{{ secondary_security_token }}" + access_key: "{{ secondary_aws_access_key }}" + secret_key: "{{ secondary_aws_secret_key }}" + session_token: "{{ secondary_security_token | default(omit) }}" region: "{{ aws_region }}" no_log: true diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases b/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases index e04e1b287..4ef4b2067 100644 --- a/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases @@ -1,4 +1 @@ -# reason: broken -disabled - cloud/aws diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml index a6ac0571a..281097db1 100644 --- a/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml @@ -1,8 +1,8 @@ - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" cloudfront_distribution: alias: "{{ cloudfront_alias | default(omit) }}" viewer_certificate: "{{ cloudfront_viewer_cert | default(omit) }}" @@ -19,12 +19,18 @@ default_cache_behavior: target_origin_id: "{{ cloudfront_hostname }}-origin.example.com" state: present - purge_origins: yes + purge_origins: true register: cf_distribution - set_fact: distribution_id: '{{ cf_distribution.id }}' + - name: ensure that default value of 'enabled' is 'true' + assert: + that: + - cf_distribution.changed + - cf_distribution.enabled + - name: ensure that default value of 'ipv6_enabled' is 'false' assert: that: @@ -49,7 +55,7 @@ cloudfront_distribution: state: present distribution_id: "{{ distribution_id }}" - ipv6_enabled: True + ipv6_enabled: true register: cf_update_ipv6 - name: ensure the 'ipv6_enabled' value has changed (new value is true) @@ -76,7 +82,7 @@ cloudfront_distribution: state: present distribution_id: "{{ distribution_id }}" - ipv6_enabled: True + ipv6_enabled: true register: cf_update_ipv6 - name: ensure the 'ipv6_enabled' value has changed (new value is true) @@ -86,45 +92,122 @@ # - not cf_update_ipv6.changed - cf_update_ipv6.is_ipv6_enabled - - name: re-run cloudfront distribution with same defaults + - name: Ensure that default value of 'http_version' is 'http2' + assert: + that: + - cf_update_ipv6.http_version == 'http2' + + - name: Update the distribution http_version to http2and3 + cloudfront_distribution: + state: present + distribution_id: "{{ distribution_id }}" + http_version: http2and3 + register: cf_update_http_version + + - name: Ensure that default value of 'http_version' is 'http2and3' + assert: + that: + - cf_update_http_version.changed + - cf_update_http_version.http_version == 'http2and3' + + # - name: re-run cloudfront distribution with same defaults + # cloudfront_distribution: + # distribution_id: "{{ distribution_id }}" + # origins: + # - domain_name: "{{ cloudfront_hostname }}-origin.example.com" + # state: present + # register: cf_dist_no_update + + # - name: ensure distribution was not updated + # assert: + # that: + # - not cf_dist_no_update.changed + + # - name: re-run cloudfront distribution using distribution id + # cloudfront_distribution: + # distribution_id: "{{ distribution_id }}" + # purge_origins: no + # state: present + # register: cf_dist_with_id + + # - name: ensure distribution was not updated + # assert: + # that: + # - not cf_dist_with_id.changed + + - name: update origin http port cloudfront_distribution: distribution_id: "{{ distribution_id }}" origins: - domain_name: "{{ cloudfront_hostname }}-origin.example.com" + custom_origin_config: + http_port: 8080 state: present - register: cf_dist_no_update + register: update_origin_http_port - - name: ensure distribution was not updated + - name: ensure http port was updated assert: that: - - not cf_dist_no_update.changed + - update_origin_http_port.changed - - name: re-run cloudfront distribution using distribution id + - name: enable origin Origin Shield cloudfront_distribution: distribution_id: "{{ distribution_id }}" - purge_origins: no + origins: + - domain_name: "{{ cloudfront_hostname }}-origin.example.com" + custom_origin_config: + http_port: 8080 + origin_shield: + enabled: true + origin_shield_region: '{{ aws_region }}' state: present - register: cf_dist_with_id + register: update_origin_origin_shield - - name: ensure distribution was not updated + - name: ensure origin Origin Shield was enabled assert: that: - - not cf_dist_with_id.changed - - - name: update origin http port + - update_origin_origin_shield.changed + - update_origin_origin_shield.origins['items'][0].origin_shield.enabled + - update_origin_origin_shield.origins['items'][0].origin_shield.origin_shield_region == aws_region + + # TODO: fix module idempotency issue + # - name: enable origin Origin Shield again to test idempotency + # cloudfront_distribution: + # distribution_id: "{{ distribution_id }}" + # origins: + # - domain_name: "{{ cloudfront_hostname }}-origin.example.com" + # custom_origin_config: + # http_port: 8080 + # origin_shield: + # enabled: true + # origin_shield_region: '{{ aws_region }}' + # state: present + # register: update_origin_origin_shield_idempotency + + # - name: test idempotency for Origin Shield + # assert: + # that: + # - not update_origin_origin_shield_idempotency.changed + # - update_origin_origin_shield_idempotency.origins['items'][0].origin_shield.enabled + # - update_origin_origin_shield_idempotency.origins['items'][0].origin_shield.origin_shield_region == '{{ aws_region }}' + + - name: disable origin Origin Shield cloudfront_distribution: distribution_id: "{{ distribution_id }}" origins: - domain_name: "{{ cloudfront_hostname }}-origin.example.com" custom_origin_config: http_port: 8080 + origin_shield: + enabled: false state: present - register: update_origin_http_port + register: update_origin_origin_shield_disable - - name: ensure http port was updated + - name: ensure origin Origin Shield was disabled assert: that: - - update_origin_http_port.changed + - update_origin_origin_shield_disable.changed + - not update_origin_origin_shield_disable.origins['items'][0].origin_shield.enabled - name: update restrictions cloudfront_distribution: @@ -167,7 +250,7 @@ id: "{{ resource_prefix }}2.example.com" default_root_object: index.html state: present - wait: yes + wait: true register: cf_add_origin - name: ensure origin was added @@ -186,7 +269,7 @@ http_port: 8080 - domain_name: "{{ resource_prefix }}2.example.com" default_root_object: index.html - wait: yes + wait: true state: present register: cf_rerun_second_origin @@ -194,7 +277,7 @@ assert: that: - cf_rerun_second_origin.origins.quantity == 2 - - not cf_rerun_second_origin.changed + # - not cf_rerun_second_origin.changed - name: run with origins in reverse order cloudfront_distribution: @@ -211,7 +294,7 @@ assert: that: - cf_rerun_second_origin_reversed.origins.quantity == 2 - - not cf_rerun_second_origin_reversed.changed + # - not cf_rerun_second_origin_reversed.changed - name: purge first origin @@ -221,7 +304,7 @@ - domain_name: "{{ resource_prefix }}2.example.com" default_cache_behavior: target_origin_id: "{{ resource_prefix }}2.example.com" - purge_origins: yes + purge_origins: true state: present register: cf_purge_origin @@ -278,12 +361,13 @@ - name: delete distribution cloudfront_distribution: distribution_id: "{{ distribution_id }}" - enabled: no - wait: yes + enabled: false + wait: true state: absent - - name: create distribution with tags + - name: create cloudfront distribution with tags and as disabled cloudfront_distribution: + enabled: false origins: - domain_name: "{{ resource_prefix }}2.example.com" id: "{{ resource_prefix }}2.example.com" @@ -296,6 +380,12 @@ - set_fact: distribution_id: '{{ cf_second_distribution.id }}' + - name: ensure that the value of 'enabled' is 'false' + assert: + that: + - cf_second_distribution.changed + - not cf_second_distribution.enabled + - name: ensure tags were set on creation assert: that: @@ -313,14 +403,14 @@ tags: ATag: tag1 Another: tag - purge_tags: yes + purge_tags: true state: present register: rerun_with_purge_tags - name: ensure that re-running didn't change assert: that: - - not rerun_with_purge_tags.changed + # - not rerun_with_purge_tags.changed - rerun_with_purge_tags.tags|length == 2 - name: add new tag to distribution @@ -330,7 +420,7 @@ - domain_name: "{{ resource_prefix }}2.example.com" tags: Third: thing - purge_tags: no + purge_tags: false state: present register: update_with_new_tag @@ -364,7 +454,7 @@ - name: check that reversing cache behaviors changes nothing when purge_cache_behaviors unset assert: that: - - not reverse_cache_behaviors.changed + # - not reverse_cache_behaviors.changed - reverse_cache_behaviors.cache_behaviors|length == 2 - name: reverse some cache behaviors properly @@ -373,7 +463,7 @@ origins: - domain_name: "{{ resource_prefix }}2.example.com" cache_behaviors: "{{ cloudfront_test_cache_behaviors|reverse|list }}" - purge_cache_behaviors: yes + purge_cache_behaviors: true state: present register: reverse_cache_behaviors_with_purge @@ -389,10 +479,10 @@ origins: - domain_name: "{{ resource_prefix }}3.example.com" id: "{{ resource_prefix }}3.example.com" - purge_origins: yes + purge_origins: true state: present register: remove_origin_in_use - ignore_errors: yes + ignore_errors: true - name: check that removing in use origin fails assert: @@ -412,18 +502,14 @@ # - path_pattern: /another/path # target_origin_id: "{{ resource_prefix }}3.example.com" # state: present - # aws_access_key: "{{ aws_access_key|default(omit) }}" - # aws_secret_key: "{{ aws_secret_key|default(omit) }}" - # security_token: "{{ security_token|default(omit) }}" - # profile: "{{ profile|default(omit) }}" # register: update_cache_behaviors in use - name: create an s3 bucket for next test # note that although public-read allows reads that we want to stop with origin_access_identity, # we also need to test without origin_access_identity and it's hard to change bucket perms later - aws_s3: - bucket: "{{ resource_prefix }}-bucket" - mode: create + s3_bucket: + name: "{{ resource_prefix }}-bucket" + state: present - name: update origin to point to the s3 bucket cloudfront_distribution: @@ -431,7 +517,7 @@ origins: - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com" id: "{{ resource_prefix }}3.example.com" - s3_origin_access_identity_enabled: yes + s3_origin_access_identity_enabled: true state: present register: update_origin_to_s3 @@ -448,7 +534,7 @@ origins: - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com" id: "{{ resource_prefix }}3.example.com" - s3_origin_access_identity_enabled: no + s3_origin_access_identity_enabled: false state: present register: update_origin_to_s3_without_origin_access @@ -460,9 +546,9 @@ loop: "{{ update_origin_to_s3_without_origin_access.origins['items'] }}" - name: delete the s3 bucket - aws_s3: - bucket: "{{ resource_prefix }}-bucket" - mode: delete + s3_bucket: + name: "{{ resource_prefix }}-bucket" + state: absent - name: check that custom_origin_config can't be used with origin_access_identity enabled cloudfront_distribution: @@ -470,18 +556,64 @@ origins: - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com" id: "{{ resource_prefix }}3.example.com" - s3_origin_access_identity_enabled: yes + s3_origin_access_identity_enabled: true custom_origin_config: origin_protocol_policy: 'http-only' state: present register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config - ignore_errors: True + ignore_errors: true - name: check that custom origin with origin access identity fails + # "s3 origin domains and custom_origin_config are mutually exclusive" + assert: + that: + - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed + + - name: check that custom_origin_config can't be used with an region-agnostic S3 domain + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + origins: + - domain_name: "{{ resource_prefix }}-bucket.s3.{{ aws_region }}.amazonaws.com" + id: "{{ resource_prefix }}3.example.com" + custom_origin_config: + http_port: 8080 + state: present + register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config + ignore_errors: true + + - name: check that custom origin with region-agnostic S3 domain fails + # "s3 origin domains and custom_origin_config are mutually exclusive" + assert: + that: + - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed + + - name: check that custom_origin_config can't be used with an region-aware S3 domain + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + origins: + - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com" + id: "{{ resource_prefix }}3.example.com" + custom_origin_config: + http_port: 8080 + state: present + register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config + ignore_errors: true + + - name: check that custom origin with region-aware S3 domain fails + # "s3 origin domains and custom_origin_config are mutually exclusive" assert: that: - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed + - name: create cloudfront distribution origin access identity + cloudfront_origin_access_identity: + state: present + comment: "this is a sample origin access identity" + register: _origin_access_id + + - set_fact: + origin_access_identity: 'origin-access-identity/cloudfront/{{ _origin_access_id.cloud_front_origin_access_identity.id }}' + - name: Update distribution to use specific access identity cloudfront_distribution: distribution_id: "{{ distribution_id }}" @@ -490,25 +622,61 @@ domain_name: "{{ resource_prefix }}.s3.amazonaws.com" s3_origin_access_identity_enabled: true s3_origin_config: - origin_access_identity: origin-access-identity/cloudfront/ANYTHING - register: update_distribution_with_specific_access_identity + origin_access_identity: '{{ origin_access_identity }}' + register: result - name: check that custom origin uses the provided origin_access_identity assert: that: - - update_distribution_with_specific_access_identity.changed - - update_distribution_with_specific_access_identity.origins.items[0].s3_origin_config.origin_access_identity == 'origin-access-identity/cloudfront/ANYTHING' + - result.changed + - result.origins['quantity'] > 0 + - result.origins['items'] | selectattr('s3_origin_config', 'defined') | map(attribute='s3_origin_config') | selectattr('origin_access_identity', 'eq', origin_access_identity) | list | length == 1 + + - name: update distribution to use cache_policy_id and origin_request_policy_id + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + default_cache_behavior: + cache_policy_id: "658327ea-f89d-4fab-a63d-7e88639e58f6" + origin_request_policy_id: "88a5eaf4-2fd4-4709-b370-b4c650ea3fcf" + state: present + register: update_distribution_with_cache_policies + + - name: ensure that the cache_policy_id and origin_request_policy_id was set + assert: + that: + - update_distribution_with_cache_policies.changed + - update_distribution_with_cache_policies.default_cache_behavior.cache_policy_id == '658327ea-f89d-4fab-a63d-7e88639e58f6' + - update_distribution_with_cache_policies.default_cache_behavior.origin_request_policy_id == '88a5eaf4-2fd4-4709-b370-b4c650ea3fcf' always: # TEARDOWN STARTS HERE - name: delete the s3 bucket - aws_s3: - bucket: "{{ resource_prefix }}-bucket" - mode: delete + s3_bucket: + name: "{{ resource_prefix }}-bucket" + state: absent + force: true + ignore_errors: true - name: clean up cloudfront distribution cloudfront_distribution: - distribution_id: "{{ distribution_id }}" - enabled: no - wait: yes + distribution_id: "{{ item }}" + enabled: false + wait: true state: absent + register: delete_distribution + ignore_errors: true + async: 1000 + poll: 0 + with_items: + - '{{ cf_second_distribution.id }}' + - '{{ cf_distribution.id }}' + + - name: Wait for cloudfront to be deleted + async_status: + jid: "{{ item.ansible_job_id }}" + register: _delete + until: _delete.finished + retries: 100 + delay: 5 + loop: "{{ delete_distribution.results }}" + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/aliases b/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/aliases new file mode 100644 index 000000000..c282df0b0 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/aliases @@ -0,0 +1,3 @@ +cloudfront_distribution_info + +cloud/aws
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/defaults/main.yml new file mode 100644 index 000000000..9e7265251 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/defaults/main.yml @@ -0,0 +1,2 @@ +--- +cloudfront_hostname: "{{ resource_prefix }}01" diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/tasks/main.yml new file mode 100644 index 000000000..b42c8915c --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_invalidation/tasks/main.yml @@ -0,0 +1,85 @@ +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + + collections: + - amazon.aws + + block: + - name: create cloudfront distribution using defaults + cloudfront_distribution: + origins: + - domain_name: "{{ cloudfront_hostname }}-origin.example.com" + id: "{{ cloudfront_hostname }}-origin.example.com" + default_cache_behavior: + target_origin_id: "{{ cloudfront_hostname }}-origin.example.com" + state: present + register: _distribution + + - set_fact: + distribution_id: '{{ _distribution.id }}' + caller_reference: '{{ _distribution.caller_reference }}' + + - name: create cloudfront invalidation + cloudfront_invalidation: + distribution_id: '{{ distribution_id }}' + target_paths: + - '/path/invalidation' + + - name: get cloudfront invalidation + cloudfront_distribution_info: + distribution_id: '{{ distribution_id }}' + list_invalidations: true + register: distribution_info + + - name: Ensure cloudfront distribution has 1 invalidation + assert: + that: + - distribution_info.cloudfront.invalidations | length == 1 + + - name: create cloudfront invalidation with caller reference + cloudfront_invalidation: + distribution_id: '{{ distribution_id }}' + target_paths: + - '/invalidation/*' + caller_reference: '{{ caller_reference }}' + register: _invalidation + + - name: Ensure invalidation was created with expected caller reference + assert: + that: + - _invalidation.invalidation.invalidation_batch.caller_reference == caller_reference + + - name: get cloudfront invalidation + cloudfront_distribution_info: + distribution_id: '{{ distribution_id }}' + list_invalidations: true + register: distribution_info + + - name: Ensure cloudfront distribution has 2 invalidations + assert: + that: + - distribution_info.cloudfront.invalidations | length == 2 + + - name: get cloudfront invalidation + cloudfront_distribution_info: + distribution_id: '{{ distribution_id }}' + invalidation_id: '{{ _invalidation.invalidation.id }}' + invalidation: true + register: invalidation_info + + - name: Ensure invalidation info was retrieved + assert: + that: + - _invalidation.invalidation.id in invalidation_info.cloudfront + + always: + - name: clean up cloudfront distribution + cloudfront_distribution: + distribution_id: "{{ _distribution.id }}" + enabled: false + wait: false + state: absent + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/aliases b/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/aliases new file mode 100644 index 000000000..c282df0b0 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/aliases @@ -0,0 +1,3 @@ +cloudfront_distribution_info + +cloud/aws
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/defaults/main.yml new file mode 100644 index 000000000..9e7265251 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/defaults/main.yml @@ -0,0 +1,2 @@ +--- +cloudfront_hostname: "{{ resource_prefix }}01" diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/tasks/main.yml new file mode 100644 index 000000000..9259108bc --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_origin_access_identity/tasks/main.yml @@ -0,0 +1,153 @@ +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + + collections: + - amazon.aws + + block: + - name: create cloudfront distribution using defaults + cloudfront_distribution: + origins: + - domain_name: "{{ cloudfront_hostname }}-origin.example.com" + id: "{{ cloudfront_hostname }}-origin.example.com" + default_cache_behavior: + target_origin_id: "{{ cloudfront_hostname }}-origin.example.com" + state: present + register: _distribution + + - set_fact: + distribution_id: '{{ _distribution.id }}' + caller_reference: '{{ _distribution.caller_reference }}' + + - name: create cloudfront distribution origin access identity + cloudfront_origin_access_identity: + state: present + comment: "this is a sample origin access identity" + register: _origin_access_id + + - name: get cloudfront distribution origin access + cloudfront_distribution_info: + distribution_id: '{{ distribution_id }}' + list_origin_access_identities: true + register: distribution_info + + - name: Ensure cloudfront distribution origin access identity exists + assert: + that: + - oid in origin_access_ids + vars: + origin_access_ids: '{{ distribution_info.cloudfront.origin_access_identities | map(attribute="Id") | list }}' + oid: '{{ _origin_access_id.cloud_front_origin_access_identity.id }}' + + - name: Update cloudfront origin access identity + cloudfront_origin_access_identity: + state: present + comment: "this origin access identity comment has been updated" + origin_access_identity_id: '{{ _origin_access_id.cloud_front_origin_access_identity.id }}' + register: _updated_origin_access_id + + - name: Ensure cloudfront origin access was updated + assert: + that: + - _updated_origin_access_id is changed + - orig_access_config.comment == "this origin access identity comment has been updated" + vars: + orig_access_config: '{{ _updated_origin_access_id.cloud_front_origin_access_identity.cloud_front_origin_access_identity_config }}' + + - name: Update cloudfront origin access identity once again + cloudfront_origin_access_identity: + state: present + comment: "this origin access identity comment has been updated" + origin_access_identity_id: '{{ _origin_access_id.cloud_front_origin_access_identity.id }}' + register: _update_idempotency + + - name: Ensure idempotency did not report change + assert: + that: + - _update_idempotency is not changed + + - name: create another cloudfront distribution origin access identity with caller reference + cloudfront_origin_access_identity: + state: present + comment: "this is another origin access identity" + caller_reference: '{{ caller_reference }}' + register: _another_origin_access_id + + - name: Ensure invalidation was created with expected caller reference + assert: + that: + - _another_origin_access_id.cloud_front_origin_access_identity.cloud_front_origin_access_identity_config.caller_reference == caller_reference + + - name: get cloudfront origin access identities + cloudfront_distribution_info: + distribution_id: '{{ distribution_id }}' + list_origin_access_identities: true + register: distribution_info + + - name: Ensure cloudfront distribution origin access identity exists + assert: + that: + - first_oid in origin_access_ids + - another_oid in origin_access_ids + vars: + origin_access_ids: '{{ distribution_info.cloudfront.origin_access_identities | map(attribute="Id") | list }}' + first_oid: '{{ _origin_access_id.cloud_front_origin_access_identity.id }}' + another_oid: '{{ _another_origin_access_id.cloud_front_origin_access_identity.id }}' + + - name: get cloudfront origin access + cloudfront_distribution_info: + distribution_id: '{{ distribution_id }}' + origin_access_identity_id: '{{ _another_origin_access_id.cloud_front_origin_access_identity.id }}' + origin_access_identity: true + register: invalidation_info + + - name: Ensure invalidation info was retrieved + assert: + that: + - _another_origin_access_id.cloud_front_origin_access_identity.id in invalidation_info.cloudfront + + - name: Delete cloudfront origin access + cloudfront_origin_access_identity: + state: absent + origin_access_identity_id: '{{ _another_origin_access_id.cloud_front_origin_access_identity.id }}' + register: _delete_origin_access + + - name: Ensure origin access identity was deleted + assert: + that: + - _delete_origin_access is changed + + - name: list cloudfront origin access identities + cloudfront_distribution_info: + list_origin_access_identities: true + register: origin_access_identities + + - name: Ensure deleted origin access identity is not part of the list + assert: + that: + - _another_origin_access_id.cloud_front_origin_access_identity.id not in origin_access_ids + vars: + origin_access_ids: '{{ origin_access_identities.cloudfront.origin_access_identities | map(attribute="Id") | list}}' + + - name: Delete cloudfront origin access once again + cloudfront_origin_access_identity: + state: absent + origin_access_identity_id: '{{ _another_origin_access_id.cloud_front_origin_access_identity.id }}' + register: _delete_origin_access + + - name: Ensure origin access identity was deleted + assert: + that: + - _delete_origin_access is not changed + + always: + - name: clean up cloudfront distribution + cloudfront_distribution: + distribution_id: "{{ _distribution.id }}" + enabled: false + wait: false + state: absent + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml b/ansible_collections/community/aws/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml index ee30f5ab5..5bab44f9f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml @@ -3,9 +3,9 @@ - name: Integration testing for the cloudfront_response_headers_policy module module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -24,7 +24,7 @@ that: - create_result is changed - create_result is not failed - - create_result.response_headers_policy.response_headers_policy_config.name == "{{ resource_prefix }}-my-header-policy" + - create_result.response_headers_policy.response_headers_policy_config.name == resource_prefix ~ '-my-header-policy' - name: Rerun same task to ensure idempotence cloudfront_response_headers_policy: diff --git a/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/description.yml b/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/description.yml index 13c12b5b6..e52c4326f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/description.yml +++ b/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/description.yml @@ -4,14 +4,14 @@ description_two: 'Another_Description - {{ resource_prefix }}' # Mandatory settings module_defaults: - community.aws.aws_codebuild: + community.aws.codebuild_project: name: '{{ project_name }}' # community.aws.aws_codebuild_info: # name: '{{ project_name }}' block: # - name: test setting description aws_codebuild (check mode) -# aws_codebuild: +# codebuild_project: # description: '{{ description_one }}' # register: update_result # check_mode: yes @@ -21,7 +21,7 @@ # - update_result is changed - name: test setting description aws_codebuild - aws_codebuild: + codebuild_project: description: '{{ description_one }}' register: update_result - name: assert that update succeeded @@ -31,7 +31,7 @@ - update_result.project.description == description_one # - name: test setting description aws_codebuild - idempotency (check mode) -# aws_codebuild: +# codebuild_project: # description: '{{ description_one }}' # register: update_result # check_mode: yes @@ -41,7 +41,7 @@ # - update_result is not changed - name: test setting description aws_codebuild - idempotency - aws_codebuild: + codebuild_project: description: '{{ description_one }}' register: update_result - name: assert that update succeeded @@ -53,7 +53,7 @@ ### # - name: test updating description on aws_codebuild (check mode) -# aws_codebuild: +# codebuild_project: # description: '{{ description_two }}' # register: update_result # check_mode: yes @@ -63,7 +63,7 @@ # - update_result is changed - name: test updating description on aws_codebuild - aws_codebuild: + codebuild_project: description: '{{ description_two }}' register: update_result - name: assert that update succeeded @@ -73,7 +73,7 @@ - update_result.project.description == description_two # - name: test updating description on aws_codebuild - idempotency (check mode) -# aws_codebuild: +# codebuild_project: # description: '{{ description_two }}' # register: update_result # check_mode: yes @@ -83,7 +83,7 @@ # - update_result is not changed - name: test updating description on aws_codebuild - idempotency - aws_codebuild: + codebuild_project: description: '{{ description_two }}' register: update_result - name: assert that update succeeded @@ -105,7 +105,7 @@ # ### # - name: test no description param aws_codebuild (check mode) -# aws_codebuild: {} +# codebuild_project: {} # register: update_result # check_mode: yes # - name: assert no change @@ -116,7 +116,7 @@ - name: test no description param aws_codebuild - aws_codebuild: {} + codebuild_project: {} register: update_result - name: assert no change assert: diff --git a/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/main.yml index f674aba24..3f8a22fd7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -27,7 +27,7 @@ # ================== integration test ========================================== - name: create CodeBuild project - aws_codebuild: + codebuild_project: name: "{{ project_name }}" description: Build project for testing the Ansible aws_codebuild module service_role: "{{ codebuild_iam_role.iam_role.arn }}" @@ -48,7 +48,7 @@ environment_variables: - { name: 'FOO_ENV', value: 'other' } tags: - - { key: 'purpose', value: 'ansible-test' } + purpose: 'ansible-test' state: present register: output retries: 10 @@ -61,7 +61,7 @@ - output.project.resource_tags.purpose == "ansible-test" - name: idempotence check rerunning same Codebuild task - aws_codebuild: + codebuild_project: name: "{{ project_name }}" description: Build project for testing the Ansible aws_codebuild module service_role: "{{ codebuild_iam_role.iam_role.arn }}" @@ -83,7 +83,7 @@ environment_variables: - { name: 'FOO_ENV', value: 'other' } tags: - - { key: 'purpose', value: 'ansible-test' } + purpose: 'ansible-test' state: present register: rerun_test_output @@ -96,7 +96,7 @@ - include_tasks: 'description.yml' - name: delete CodeBuild project - aws_codebuild: + codebuild_project: name: "{{ output.project.name }}" source: type: CODEPIPELINE diff --git a/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/tagging.yml b/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/tagging.yml index a26f2a337..2e31df2d8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/tagging.yml +++ b/ansible_collections/community/aws/tests/integration/targets/codebuild_project/tasks/tagging.yml @@ -27,7 +27,7 @@ new_snake_case_key: snake_case_value # Mandatory settings module_defaults: - community.aws.aws_codebuild: + community.aws.codebuild_project: name: '{{ project_name }}' # community.aws.aws_codebuild_info: # name: '{{ project_name }}' @@ -36,7 +36,7 @@ ### # - name: test adding tags to aws_codebuild (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: '{{ first_tags }}' # purge_tags: True # register: update_result @@ -47,7 +47,7 @@ # - update_result is changed - name: test adding tags to aws_codebuild - aws_codebuild: + codebuild_project: resource_tags: '{{ first_tags }}' purge_tags: True register: update_result @@ -58,7 +58,7 @@ - update_result.project.resource_tags == first_tags # - name: test adding tags to aws_codebuild - idempotency (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: '{{ first_tags }}' # purge_tags: True # register: update_result @@ -69,7 +69,7 @@ # - update_result is not changed - name: test adding tags to aws_codebuild - idempotency - aws_codebuild: + codebuild_project: resource_tags: '{{ first_tags }}' purge_tags: True register: update_result @@ -82,7 +82,7 @@ ### # - name: test updating tags with purge on aws_codebuild (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: '{{ second_tags }}' # purge_tags: True # register: update_result @@ -93,7 +93,7 @@ # - update_result is changed - name: test updating tags with purge on aws_codebuild - aws_codebuild: + codebuild_project: resource_tags: '{{ second_tags }}' purge_tags: True register: update_result @@ -104,7 +104,7 @@ - update_result.project.resource_tags == second_tags # - name: test updating tags with purge on aws_codebuild - idempotency (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: '{{ second_tags }}' # purge_tags: True # register: update_result @@ -115,7 +115,7 @@ # - update_result is not changed - name: test updating tags with purge on aws_codebuild - idempotency - aws_codebuild: + codebuild_project: resource_tags: '{{ second_tags }}' purge_tags: True register: update_result @@ -128,7 +128,7 @@ ### # - name: test updating tags without purge on aws_codebuild (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: '{{ third_tags }}' # purge_tags: False # register: update_result @@ -139,7 +139,7 @@ # - update_result is changed - name: test updating tags without purge on aws_codebuild - aws_codebuild: + codebuild_project: resource_tags: '{{ third_tags }}' purge_tags: False register: update_result @@ -150,7 +150,7 @@ - update_result.project.resource_tags == final_tags # - name: test updating tags without purge on aws_codebuild - idempotency (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: '{{ third_tags }}' # purge_tags: False # register: update_result @@ -161,7 +161,7 @@ # - update_result is not changed - name: test updating tags without purge on aws_codebuild - idempotency - aws_codebuild: + codebuild_project: resource_tags: '{{ third_tags }}' purge_tags: False register: update_result @@ -184,7 +184,7 @@ # ### # - name: test no tags param aws_codebuild (check mode) -# aws_codebuild: {} +# codebuild_project: {} # register: update_result # check_mode: yes # - name: assert no change @@ -195,7 +195,7 @@ # - name: test no tags param aws_codebuild - aws_codebuild: {} + codebuild_project: {} register: update_result - name: assert no change assert: @@ -206,7 +206,7 @@ ### # - name: test removing tags from aws_codebuild (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: {} # purge_tags: True # register: update_result @@ -217,7 +217,7 @@ # - update_result is changed - name: test removing tags from aws_codebuild - aws_codebuild: + codebuild_project: resource_tags: {} purge_tags: True register: update_result @@ -228,7 +228,7 @@ - update_result.project.resource_tags == {} # - name: test removing tags from aws_codebuild - idempotency (check mode) -# aws_codebuild: +# codebuild_project: # resource_tags: {} # purge_tags: True # register: update_result @@ -239,7 +239,7 @@ # - update_result is not changed - name: test removing tags from aws_codebuild - idempotency - aws_codebuild: + codebuild_project: resource_tags: {} purge_tags: True register: update_result diff --git a/ansible_collections/community/aws/tests/integration/targets/codecommit_repository/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/codecommit_repository/tasks/main.yml index acf194e1e..62dd1653b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/codecommit_repository/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/codecommit_repository/tasks/main.yml @@ -1,14 +1,14 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: # ============================================================ - name: Create a repository (CHECK MODE) - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" description: original comment state: present @@ -19,7 +19,7 @@ - output is changed - name: Create a repository - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" description: original comment state: present @@ -27,11 +27,11 @@ - assert: that: - output is changed - - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo' + - output.repository_metadata.repository_name == resource_prefix ~ '_repo' - output.repository_metadata.repository_description == 'original comment' - name: No-op update to repository - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" description: original comment state: present @@ -39,11 +39,11 @@ - assert: that: - output is not changed - - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo' + - output.repository_metadata.repository_name == resource_prefix ~ '_repo' - output.repository_metadata.repository_description == 'original comment' - name: Update repository description (CHECK MODE) - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" description: new comment state: present @@ -52,11 +52,11 @@ - assert: that: - output is changed - - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo' + - output.repository_metadata.repository_name == resource_prefix ~ '_repo' - output.repository_metadata.repository_description == 'original comment' - name: Update repository description - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" description: new comment state: present @@ -64,12 +64,12 @@ - assert: that: - output is changed - - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo' + - output.repository_metadata.repository_name == resource_prefix ~ '_repo' - output.repository_metadata.repository_description == 'new comment' # ============================================================ - name: Delete a repository (CHECK MODE) - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" state: absent register: output @@ -79,7 +79,7 @@ - output is changed - name: Delete a repository - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" state: absent register: output @@ -88,7 +88,7 @@ - output is changed - name: Delete a non-existent repository - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" state: absent register: output @@ -97,27 +97,27 @@ - output is not changed - name: Create a repository without description - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" state: present register: output - assert: that: - output is changed - - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo' + - output.repository_metadata.repository_name == resource_prefix ~ '_repo' - name: No-op update to repository without description - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" state: present register: output - assert: that: - output is not changed - - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo' + - output.repository_metadata.repository_name == resource_prefix ~ '_repo' - name: Delete a repository without description - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" state: absent register: output @@ -128,7 +128,7 @@ always: ###### TEARDOWN STARTS HERE ###### - name: Delete a repository - aws_codecommit: + codecommit_repository: name: "{{ resource_prefix }}_repo" state: absent ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/codepipeline/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/codepipeline/tasks/main.yml index 2e8e7d8f3..57353ed8a 100644 --- a/ansible_collections/community/aws/tests/integration/targets/codepipeline/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/codepipeline/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -23,7 +23,7 @@ # ================== integration test ========================================== - name: create CodePipeline - aws_codepipeline: + codepipeline: name: "{{ codepipeline_name }}" role_arn: "{{ codepipeline_iam_role.iam_role.arn }}" artifact_store: @@ -66,11 +66,11 @@ - assert: that: - output.changed == True - - output.pipeline.name == "{{ codepipeline_name }}" + - output.pipeline.name == codepipeline_name - output.pipeline.stages|length > 1 - name: idempotence check rerunning same CodePipeline task - aws_codepipeline: + codepipeline: name: "{{ codepipeline_name }}" role_arn: "{{ codepipeline_iam_role.iam_role.arn }}" artifact_store: @@ -113,7 +113,7 @@ - rerun_test_output.pipeline == output.pipeline - name: Test deletion of CodePipeline - aws_codepipeline: + codepipeline: name: "{{ codepipeline_name }}" role_arn: '' artifact_store: {} @@ -131,7 +131,7 @@ always: - name: Cleanup - delete test CodePipeline - aws_codepipeline: + codepipeline: name: "{{ codepipeline_name }}" role_arn: '' artifact_store: {} diff --git a/ansible_collections/community/aws/tests/integration/targets/config/defaults/main.yaml b/ansible_collections/community/aws/tests/integration/targets/config/defaults/main.yaml index 26b39c583..3beeca841 100644 --- a/ansible_collections/community/aws/tests/integration/targets/config/defaults/main.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/config/defaults/main.yaml @@ -1,4 +1,5 @@ --- config_s3_bucket: '{{ resource_prefix }}-config-records' +config_kms_key: '{{ resource_prefix }}-kms' config_sns_name: '{{ resource_prefix }}-delivery-channel-test-topic' config_role_name: 'ansible-test-{{ resource_prefix }}' diff --git a/ansible_collections/community/aws/tests/integration/targets/config/tasks/main.yaml b/ansible_collections/community/aws/tests/integration/targets/config/tasks/main.yaml index 313f9f677..244c4b29b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/config/tasks/main.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/config/tasks/main.yaml @@ -4,15 +4,22 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: # ============================================================ # Prerequisites # ============================================================ + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + + - name: Store Account ID for later use + set_fact: + aws_account_id: "{{ aws_caller_info.account }}" - name: ensure IAM role exists iam_role: @@ -21,7 +28,7 @@ state: present create_instance_profile: no managed_policy: - - 'arn:aws:iam::aws:policy/service-role/AWSConfigRole' + - arn:aws:iam::aws:policy/service-role/AWS_ConfigRole register: config_iam_role - name: ensure SNS topic exists @@ -37,6 +44,12 @@ s3_bucket: name: "{{ config_s3_bucket }}" + - name: ensure KMS key exists + kms_key: + alias: "{{ config_kms_key }}" + policy: "{{ lookup('template', 'config-kms-policy.json.j2') }}" + register: kms_key + - name: ensure S3 access for IAM role iam_policy: iam_type: role @@ -49,7 +62,7 @@ # Module requirement testing # ============================================================ - name: test rule with no source parameter - aws_config_rule: + config_rule: name: random_name state: present register: output @@ -62,7 +75,7 @@ - 'output.msg.startswith("missing required arguments:")' - name: test resource_type delivery_channel with no s3_bucket parameter - aws_config_delivery_channel: + config_delivery_channel: name: random_name state: present register: output @@ -75,7 +88,7 @@ - 'output.msg.startswith("missing required arguments:")' - name: test resource_type configuration_recorder with no role_arn parameter - aws_config_recorder: + config_recorder: name: random_name state: present register: output @@ -88,7 +101,7 @@ - 'output.msg.startswith("state is present but all of the following are missing")' - name: test resource_type configuration_recorder with no recording_group parameter - aws_config_recorder: + config_recorder: name: random_name state: present role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' @@ -102,7 +115,7 @@ - 'output.msg.startswith("state is present but all of the following are missing")' - name: test resource_type aggregation_authorization with no authorized_account_id parameter - aws_config_aggregation_authorization: + config_aggregation_authorization: state: present register: output ignore_errors: true @@ -114,7 +127,7 @@ - 'output.msg.startswith("missing required arguments:")' - name: test resource_type aggregation_authorization with no authorized_aws_region parameter - aws_config_aggregation_authorization: + config_aggregation_authorization: state: present authorized_account_id: '123456789012' register: output @@ -127,7 +140,7 @@ - 'output.msg.startswith("missing required arguments:")' - name: test resource_type configuration_aggregator with no account_sources parameter - aws_config_aggregator: + config_aggregator: name: random_name state: present register: output @@ -140,7 +153,7 @@ - 'output.msg.startswith("missing required arguments: account_sources")' - name: test resource_type configuration_aggregator with no organization_source parameter - aws_config_aggregator: + config_aggregator: name: random_name state: present account_sources: [] @@ -157,7 +170,7 @@ # Creation testing # ============================================================ - name: Create Configuration Recorder for AWS Config - aws_config_recorder: + config_recorder: name: '{{ resource_prefix }}-recorder' state: present role_arn: "{{ config_iam_role.arn }}" @@ -171,11 +184,26 @@ - output.changed - name: Create Delivery Channel for AWS Config - aws_config_delivery_channel: + config_delivery_channel: + name: '{{ resource_prefix }}-channel' + state: present + s3_bucket: "{{ config_s3_bucket }}" + s3_prefix: "foo/bar" + sns_topic_arn: "{{ config_sns_topic.sns_arn }}" + delivery_frequency: 'Twelve_Hours' + register: output + + - assert: + that: + - output.changed + + - name: Create Delivery Channel for AWS Config with a KMS key + config_delivery_channel: name: '{{ resource_prefix }}-channel' state: present s3_bucket: "{{ config_s3_bucket }}" s3_prefix: "foo/bar" + kms_key_arn: "{{ kms_key.key_arn }}" sns_topic_arn: "{{ config_sns_topic.sns_arn }}" delivery_frequency: 'Twelve_Hours' register: output @@ -185,7 +213,7 @@ - output.changed - name: Create Config Rule for AWS Config - aws_config_rule: + config_rule: name: '{{ resource_prefix }}-rule' state: present description: 'This AWS Config rule checks for public write access on S3 buckets' @@ -202,7 +230,7 @@ - output.changed - name: Create aws_config_aggregator - aws_config_aggregator: + config_aggregator: name: random_name state: present account_sources: [] @@ -217,7 +245,7 @@ - output is changed - name: Create aws_config_aggregator - idempotency - aws_config_aggregator: + config_aggregator: name: random_name state: present account_sources: [] @@ -235,7 +263,7 @@ # Update testing # ============================================================ - name: Update Configuration Recorder - aws_config_recorder: + config_recorder: name: '{{ resource_prefix }}-recorder' state: present role_arn: "{{ config_iam_role.arn }}" @@ -251,7 +279,7 @@ - output.changed - name: Update Delivery Channel - aws_config_delivery_channel: + config_delivery_channel: name: '{{ resource_prefix }}-channel' state: present s3_bucket: "{{ config_s3_bucket }}" @@ -263,8 +291,22 @@ that: - output.changed + - name: Update Delivery Channel with KMS key + config_delivery_channel: + name: '{{ resource_prefix }}-channel' + state: present + s3_bucket: "{{ config_s3_bucket }}" + sns_topic_arn: "{{ config_sns_topic.sns_arn }}" + kms_key_arn: "{{ kms_key.key_arn }}" + delivery_frequency: 'TwentyFour_Hours' + register: output + + - assert: + that: + - output.changed + - name: Update Config Rule - aws_config_rule: + config_rule: name: '{{ resource_prefix }}-rule' state: present description: 'This AWS Config rule checks for public write access on S3 buckets' @@ -281,7 +323,7 @@ - output.changed - name: Update Config Rule - idempotency - aws_config_rule: + config_rule: name: '{{ resource_prefix }}-rule' state: present description: 'This AWS Config rule checks for public write access on S3 buckets' @@ -298,7 +340,7 @@ - output is not changed - name: Update aws_config_aggregator - aws_config_aggregator: + config_aggregator: name: random_name state: present account_sources: [] @@ -315,7 +357,7 @@ - output is changed - name: Update aws_config_aggregator - idempotency - aws_config_aggregator: + config_aggregator: name: random_name state: present account_sources: [] @@ -335,7 +377,7 @@ # Read testing # ============================================================ - name: Don't update Configuration Recorder - aws_config_recorder: + config_recorder: name: '{{ resource_prefix }}-recorder' state: present role_arn: "{{ config_iam_role.arn }}" @@ -351,7 +393,7 @@ - not output.changed - name: Don't update Delivery Channel - aws_config_delivery_channel: + config_delivery_channel: name: '{{ resource_prefix }}-channel' state: present s3_bucket: "{{ config_s3_bucket }}" @@ -364,7 +406,7 @@ - not output.changed - name: Don't update Config Rule - aws_config_rule: + config_rule: name: '{{ resource_prefix }}-rule' state: present description: 'This AWS Config rule checks for public write access on S3 buckets' @@ -383,7 +425,7 @@ always: - name: delete aws_config_aggregator - aws_config_aggregator: + config_aggregator: name: random_name state: absent register: output @@ -393,32 +435,32 @@ # Destroy testing # ============================================================ - name: Destroy Configuration Recorder - aws_config_recorder: + config_recorder: name: '{{ resource_prefix }}-recorder' state: absent register: output - ignore_errors: yes + ignore_errors: true # - assert: # that: # - output.changed - name: Destroy Delivery Channel - aws_config_delivery_channel: + config_delivery_channel: name: '{{ resource_prefix }}-channel' state: absent s3_bucket: "{{ config_s3_bucket }}" sns_topic_arn: "{{ config_sns_topic.sns_arn }}" delivery_frequency: 'TwentyFour_Hours' register: output - ignore_errors: yes + ignore_errors: true # - assert: # that: # - output.changed - name: Destroy Config Rule - aws_config_rule: + config_rule: name: '{{ resource_prefix }}-rule' state: absent description: 'This AWS Config rule checks for public write access on S3 buckets' @@ -429,7 +471,7 @@ owner: AWS identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED' register: output - ignore_errors: yes + ignore_errors: true # - assert: # that: @@ -445,23 +487,29 @@ policy_name: AwsConfigRecorderTestRoleS3Policy state: absent policy_json: "{{ lookup( 'template', 'config-s3-policy.json.j2') }}" - ignore_errors: yes + ignore_errors: true - name: remove IAM role iam_role: name: '{{ config_role_name }}' state: absent - ignore_errors: yes + ignore_errors: true - name: remove SNS topic sns_topic: name: '{{ config_sns_name }}' state: absent - ignore_errors: yes + ignore_errors: true - name: remove S3 bucket s3_bucket: name: "{{ config_s3_bucket }}" state: absent - force: yes - ignore_errors: yes + force: true + ignore_errors: true + + - name: remove KMS key + kms_key: + alias: "{{ config_kms_key }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/config/templates/config-kms-policy.json.j2 b/ansible_collections/community/aws/tests/integration/targets/config/templates/config-kms-policy.json.j2 new file mode 100644 index 000000000..260adc839 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/config/templates/config-kms-policy.json.j2 @@ -0,0 +1,51 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_account_id }}:root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow use of the key", + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::{{ aws_account_id }}:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig", + ] + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Sid": "Allow attachment of persistent resources", + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::{{ aws_account_id }}:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig", + ] + }, + "Action": [ + "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } + } + ] +}
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/connection/test_assume.yml b/ansible_collections/community/aws/tests/integration/targets/connection/test_assume.yml new file mode 100644 index 000000000..f979ef2d4 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/connection/test_assume.yml @@ -0,0 +1,16 @@ +- name: 'Ensure remote user exists' + ansible.builtin.user: + name: '{{ user_name }}' + shell: /bin/bash + become_user: 'root' + become: True + +- name: 'Attempt to run a shell command as the user ({{ user_name }})' + become_user: '{{ user_name }}' + become: True + command: 'id -u -n' + register: id_cmd + +- assert: + that: + - id_cmd.stdout == user_name diff --git a/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml b/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml index 829ac93b3..b8bdc43f4 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml @@ -10,9 +10,12 @@ tasks: ### test wait_for_connection plugin + - wait_for_connection: timeout: '{{ wait_for_timeout | default(100) }}' + ### Try to gather the default facts from the host + - name: Gather facts ansible.builtin.setup: @@ -52,6 +55,30 @@ - name: remove remote temp file action: "{{ action_prefix }}file path={{ remote_file }} state=absent" + ### Test that we're the user we expect to be and can change where appropriate + # Regression - https://github.com/ansible-collections/community.aws/issues/853 + + - name: Test user manipulaton + when: + - '"aws_ssm_linux" in group_names' + block: + - name: 'Find ID when become=False' + become: False + command: 'id -u -n' + register: id_cmd + + - assert: + that: + - id_cmd.stdout == 'ssm-user' + + - include_tasks: 'test_assume.yml' + loop: + - ssm-agent + - zuul + - root + loop_control: + loop_var: user_name + ### copy an empty file - name: copy an empty file action: "{{ action_prefix }}copy content= dest={{ remote_empty_file }}" @@ -62,4 +89,4 @@ assert: that: - stat_empty_file_cmd.stat.isreg # it is a regular file - - stat_empty_file_cmd.stat.size == 0 + - stat_empty_file_cmd.stat.size == 0
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml index db519fb63..9e2f3fd01 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml @@ -2,7 +2,7 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos encrypted_bucket: False s3_bucket_region: 'eu-central-1' s3_addressing_style: virtual diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/aliases b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/aliases index eb8e0b891..eb8e0b891 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/aliases diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_setup.yml index 353757e33..d64cdabb6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_setup.yml @@ -2,4 +2,4 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_teardown.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_teardown.yml index 3ab6f74cf..3ab6f74cf 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_teardown.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_teardown.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/meta/main.yml index d055eb86e..d055eb86e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/meta/main.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/runme.sh b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/runme.sh index c99b3b066..c99b3b066 100755 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_fedora/runme.sh +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_centos/runme.sh diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml index 1f223757c..eff5f5386 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml @@ -2,7 +2,7 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos s3_bucket_region: 'eu-central-1' # Post 2019 regions behave differently from other regions # they're worth testing but it's not possible in CI today. diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml index bfea0d0dc..d6e650cd3 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml @@ -2,6 +2,6 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos encrypted_bucket: True test_suffix: encrypteds3 diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml index 71c850e9d..e0296c7d6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml @@ -2,6 +2,6 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos test_suffix: endpoint endpoint_url: 'https://s3.dualstack.{{ aws_region }}.amazonaws.com' diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml index 3f4c2e47d..b8169d2c6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml @@ -2,5 +2,5 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos profile_name: test_profile diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml index 992426976..6ef4dfd47 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml @@ -2,6 +2,6 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos use_ssm_document: True test_suffix: document diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml index ff67bc2c3..2b3755b88 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml @@ -2,5 +2,5 @@ roles: - role: ../setup_connection_aws_ssm vars: - target_os: fedora + target_os: centos credential_vars: True diff --git a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_windows/aliases b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_windows/aliases index eb8e0b891..b321dedb6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_windows/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm_windows/aliases @@ -2,3 +2,5 @@ time=10m cloud/aws connection_aws_ssm + +unstable diff --git a/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml index 328ea17a5..8d12933a4 100644 --- a/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml index 0952602f1..712bc82be 100644 --- a/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/aliases b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/aliases index dc5eacd6f..17466b153 100644 --- a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/aliases @@ -1,2 +1,4 @@ cloud/aws time=50m + +unstable diff --git a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/defaults/main.yml index 8b92884a4..de11cefba 100644 --- a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/defaults/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/defaults/main.yml @@ -1,5 +1,7 @@ --- table_name: "{{ resource_prefix }}" +table_name_composite_pk: "{{ resource_prefix }}-composite-pk" +table_name_composite_pk_local_indexes: "{{ resource_prefix }}-composite-pk-local-indexes" table_name_on_demand: "{{ resource_prefix }}-pay-per-request" table_name_on_demand_complex: "{{ resource_prefix }}-pay-per-request-complex" @@ -31,6 +33,32 @@ indexes: read_capacity: 2 write_capacity: 2 +local_indexes: + - name: NamedIndex + type: include + hash_key_name: "id" ## == table_index + hash_key_type: "NUMBER" ## == table_index_type + range_key_name: create_time + includes: + - other_field + - other_field2 + read_capacity: 10 + write_capacity: 10 + - name: AnotherIndex + type: all + hash_key_name: id ## == table_index + hash_key_type: "NUMBER" ## == table_index_type + range_key_name: bar + read_capacity: 5 + write_capacity: 5 + - name: KeysOnlyIndex + type: keys_only + hash_key_name: id ## == table_index + hash_key_type: "NUMBER" ## == table_index_type + range_key_name: baz + read_capacity: 2 + write_capacity: 2 + indexes_pay_per_request: - name: NamedIndex type: global_include diff --git a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/meta/main.yml index 504e72117..32cf5dda7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/meta/main.yml @@ -1,4 +1 @@ -dependencies: - - role: setup_botocore_pip - vars: - botocore_version: "1.23.18" +dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/main.yml index b208f4ca5..268e61bae 100644 --- a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/main.yml @@ -7,12 +7,12 @@ # - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - include: "test_pay_per_request.yml" + - include_tasks: "test_pay_per_request.yml" # ============================================== @@ -115,6 +115,262 @@ - create_table.write_capacity == 1 # ============================================== + # Attempting to create a table without PK range key but with local indexes will result in an expected failure. + # "One or more parameter values were invalid: Table KeySchema does not have a range key, which is required when specifying a LocalSecondaryIndex" + + - name: Create table with simple PK with local indexes - test failure + dynamodb_table: + state: present + name: "{{ table_name_composite_pk }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + indexes: "{{ local_indexes }}" + ignore_errors: yes + register: create_table + + - name: Check results - Create table with simple PK with local indexes + assert: + that: + - create_table is failed + + # ============================================== + # Attempting to create a table with composite PK but with local indexes using different hash key will result in an expected failure. + # "One or more parameter values were invalid: Index KeySchema does not have the same leading hash key as table KeySchema for index: NamedIndex. index hash key: id, table hash key: NOT_id" + + - name: Create table with composite PK with mismatching local indexes - test failure + dynamodb_table: + state: present + name: "{{ table_name_composite_pk }}" + hash_key_name: "NOT_{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + indexes: "{{ local_indexes }}" + ignore_errors: yes + register: create_table + + - name: Check results - Create table with composite PK with mismatching local indexes + assert: + that: + - create_table is failed + + # ============================================== + + - name: Create table with composite PK - check_mode + dynamodb_table: + state: present + name: "{{ table_name_composite_pk }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + register: create_table + check_mode: True + + - name: Check results - Create table with composite PK - check_mode + assert: + that: + - create_table is successful + - create_table is changed + + - name: Create table with composite PK + dynamodb_table: + state: present + name: "{{ table_name_composite_pk }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + register: create_table + + - name: Check results - Create table with composite PK + assert: + that: + - create_table is successful + - create_table is changed + - '"hash_key_name" in create_table' + - '"hash_key_type" in create_table' + - '"indexes" in create_table' + - '"range_key_name" in create_table' + - '"range_key_type" in create_table' + - '"read_capacity" in create_table' + - '"region" in create_table' + - '"table_name" in create_table' + - '"table_status" in create_table' + - '"tags" in create_table' + - '"write_capacity" in create_table' + - create_table.hash_key_name == table_index + - create_table.hash_key_type == table_index_type + - create_table.range_key_name == range_index + - create_table.range_key_type == range_index_type + - create_table.indexes | length == 0 + - create_table.read_capacity == 1 + - create_table.table_name == table_name_composite_pk + - create_table.write_capacity == 1 + + - name: Create table with composite PK - idempotent - check_mode + dynamodb_table: + state: present + name: "{{ table_name_composite_pk }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + register: create_table + check_mode: True + + - name: Check results - Create table with composite PK - idempotent - check_mode + assert: + that: + - create_table is successful + - create_table is not changed + + - name: Create table with composite PK - idempotent + dynamodb_table: + state: present + name: "{{ table_name_composite_pk }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + register: create_table + + - name: Check results - Create table with composite PK - idempotent + assert: + that: + - create_table is successful + - create_table is not changed + - '"hash_key_name" in create_table' + - '"hash_key_type" in create_table' + - '"indexes" in create_table' + - '"range_key_name" in create_table' + - '"range_key_type" in create_table' + - '"read_capacity" in create_table' + - '"region" in create_table' + - '"table_name" in create_table' + - '"table_status" in create_table' + - '"tags" in create_table' + - '"write_capacity" in create_table' + - create_table.hash_key_name == table_index + - create_table.hash_key_type == table_index_type + - create_table.range_key_name == range_index + - create_table.range_key_type == range_index_type + - create_table.indexes | length == 0 + - create_table.read_capacity == 1 + - create_table.table_name == table_name_composite_pk + - create_table.write_capacity == 1 + + # ============================================== + + - name: Create table with composite PK and local indexes - check_mode + dynamodb_table: + state: present + name: "{{ table_name_composite_pk_local_indexes }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + indexes: "{{ local_indexes }}" + register: create_table + check_mode: True + + - name: Check results - Create table with composite PK and local indexes - check_mode + assert: + that: + - create_table is successful + - create_table is changed + + - name: Create table with composite PK and local indexes + dynamodb_table: + state: present + name: "{{ table_name_composite_pk_local_indexes }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + indexes: "{{ local_indexes }}" + register: create_table + + - name: Check results - Create table with composite PK and local indexes + assert: + that: + - create_table is successful + - create_table is changed + - '"hash_key_name" in create_table' + - '"hash_key_type" in create_table' + - '"indexes" in create_table' + - '"range_key_name" in create_table' + - '"range_key_type" in create_table' + - '"read_capacity" in create_table' + - '"region" in create_table' + - '"table_name" in create_table' + - '"table_status" in create_table' + - '"tags" in create_table' + - '"write_capacity" in create_table' + - create_table.hash_key_name == table_index + - create_table.hash_key_type == table_index_type + - create_table.range_key_name == range_index + - create_table.range_key_type == range_index_type + - create_table.indexes | length == 3 + - create_table.read_capacity == 1 + - create_table.table_name == table_name_composite_pk_local_indexes + - create_table.write_capacity == 1 + + - name: Create table with composite PK and local indexes - idempotent - check_mode + dynamodb_table: + state: present + name: "{{ table_name_composite_pk_local_indexes }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + indexes: "{{ local_indexes }}" + register: create_table + check_mode: True + + - name: Check results - Create table with composite PK and local indexes - idempotent - check_mode + assert: + that: + - create_table is successful + - create_table is not changed + + - name: Create table with composite PK and local indexes - idempotent + dynamodb_table: + state: present + name: "{{ table_name_composite_pk_local_indexes }}" + hash_key_name: "{{ table_index }}" + hash_key_type: "{{ table_index_type }}" + range_key_name: "{{ range_index }}" + range_key_type: "{{ range_index_type }}" + indexes: "{{ local_indexes }}" + register: create_table + + - name: Check results - Create table with composite PK and local indexes - idempotent + assert: + that: + - create_table is successful + - create_table is not changed + - '"hash_key_name" in create_table' + - '"hash_key_type" in create_table' + - '"indexes" in create_table' + - '"range_key_name" in create_table' + - '"range_key_type" in create_table' + - '"read_capacity" in create_table' + - '"region" in create_table' + - '"table_name" in create_table' + - '"table_status" in create_table' + - '"tags" in create_table' + - '"write_capacity" in create_table' + - create_table.hash_key_name == table_index + - create_table.hash_key_type == table_index_type + - create_table.range_key_name == range_index + - create_table.range_key_type == range_index_type + - create_table.indexes | length == 3 + - create_table.read_capacity == 1 + - create_table.table_name == table_name_composite_pk_local_indexes + - create_table.write_capacity == 1 + + # ============================================== - name: Tag table - check_mode dynamodb_table: @@ -488,14 +744,14 @@ - update_indexes is successful - update_indexes is not changed - - name: Update table add indexes - idempotent + - name: Update table add global indexes - idempotent dynamodb_table: state: present name: "{{ table_name }}" indexes: "{{ indexes }}" register: update_indexes - - name: Check results - Update table add indexes - idempotent + - name: Check results - Update table add global indexes - idempotent assert: that: - update_indexes is successful @@ -588,8 +844,6 @@ tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" check_mode: True - name: Check results - Create complex table - check_mode @@ -612,8 +866,6 @@ tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: Check results - Create complex table assert: @@ -656,8 +908,6 @@ tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" check_mode: True - name: Check results - Create complex table - idempotent - check_mode @@ -680,8 +930,6 @@ tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: Check results - Create complex table - idempotent assert: @@ -719,8 +967,6 @@ name: "{{ table_name }}" table_class: "STANDARD" register: update_class - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" check_mode: True - name: Check results - Update table class - check_mode @@ -734,8 +980,6 @@ state: present name: "{{ table_name }}" table_class: "STANDARD" - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" register: update_class - name: Check results - Update table class @@ -873,6 +1117,20 @@ wait: false register: delete_table + - name: Delete provisoned table with composite key + dynamodb_table: + state: absent + name: "{{ table_name_composite_pk }}" + wait: false + register: delete_table + + - name: Delete provisoned table with composite key and local indexes + dynamodb_table: + state: absent + name: "{{ table_name_composite_pk_local_indexes }}" + wait: false + register: delete_table + - name: Delete on-demand table dynamodb_table: state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml index a05021154..b469a1b51 100644 --- a/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml +++ b/ansible_collections/community/aws/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml @@ -22,6 +22,7 @@ hash_key_name: "{{ table_index }}" hash_key_type: "{{ table_index_type }}" billing_mode: PAY_PER_REQUEST + wait_timeout: 450 register: create_table - name: Check results - Create table diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/aliases b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/aliases new file mode 100644 index 000000000..913237649 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/aliases @@ -0,0 +1,9 @@ +# reason: missing-policy +# To test Carrier Gateway in the VPC, the Wavelength subnet +# group should be enabled on the AWS Account. +unsupported + +cloud/aws + +ec2_carrier_gateway +ec2_carrier_gateway_info diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/defaults/main.yml new file mode 100644 index 000000000..2e8c38f88 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/defaults/main.yml @@ -0,0 +1,3 @@ +--- +vpc_name: '{{ resource_prefix }}-ec2-vpc-cagw' +cagw_name: '{{ resource_prefix }}-ec2-vpc-cagw' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/meta/main.yml index 32cf5dda7..32cf5dda7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/meta/main.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/tasks/main.yml new file mode 100644 index 000000000..4d005b90a --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/tasks/main.yml @@ -0,0 +1,167 @@ +--- +- name: 'ec2_carrier_gateway integration tests' + collections: + - community.aws + module_defaults: + group/aws: + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # ============================================================ + - debug: msg="Setting up test dependencies" + + - name: create a VPC + ec2_vpc_net: + name: "{{ vpc_name }}-{{ item }}" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Description: "Created by ansible-test for CAGW tests" + register: vpc_result + loop: [1] + + - name: use set fact for vpc ids + set_fact: + vpc_id_1: '{{ vpc_result.results.0.vpc.id }}' + + # ============================================================ + - debug: msg="Running tests" + + - name: create carrier gateway and attach it to vpc + ec2_carrier_gateway: + state: present + vpc_id: '{{ vpc_id_1 }}' + name: "{{ cagw_name }}" + register: cagw + check_mode: true + + - name: use set fact for cagw ids + set_fact: + cagw_id: '{{ cagw.carrier_gateway_id }}' + + - assert: + that: + - cagw.changed + - cagw.vpc_id == vpc_id_1 + - cagw.tags.Name == cagw_name + + - name: test idempotence + ec2_carrier_gateway: + state: present + vpc_id: '{{ vpc_id_1 }}' + name: "{{ cagw_name }}" + register: cagw + check_mode: true + + - assert: + that: + - not cagw.changed + - cagw.carrier_gateway_id == cagw_id + + # ============================================================ + + - name: get VPC CAGW facts by ID (CHECK) + ec2_carrier_gateway_info: + carrier_gateway_id: ['{{ cagw_id }}'] + register: cagw_info + check_mode: True + + - name: verify expected facts + vars: + cagw_details: '{{ cagw_info.carrier_gateways[0] }}' + assert: + that: + - cagw_info.carrier_gateways | length == 1 + - '"carrier_gateway_id" in cagw_details' + - '"tags" in cagw_details' + - '"vpc_id" in cagw_details' + - cagw_details.carrier_gateway_id == cagw_id + - '"Name" in cagw_details.tags' + - cagw_details.tags.Name == cagw_name + + - name: get VPC CAGW facts by Tag + ec2_carrier_gateway_info: + filters: + "tag:Name": "{{ cagw_name }}" + register: cagw_info + + - name: verify expected facts + vars: + cagw_details: '{{ cagw_info.virtual_gateways[0] }}' + assert: + that: + - cagw_info.virtual_gateways | length == 1 + - '"carrier_gateway_id" in cagw_details' + - '"state" in cagw_details' + - '"tags" in cagw_details' + - cagw_details.carrier_gateway_id == cagw_id + - '"Name" in cagw_details.tags' + - cagw_details.tags.Name == cagw_name + + + # ============================================================ + + - name: get all CAGWs + ec2_carrier_gateway_info: + register: cagw_info + + - name: verify test CAGW is in the results + vars: + cagw_id_list: '{{ cagw_info.carrier_gateways | map(attribute="carrier_gateway_id") | list }}' + assert: + that: + - cagw_id in cagw_id_list + + # ============================================================ + + - include_tasks: 'tags.yml' + + # ============================================================ + + - name: delete carrier gateway + ec2_carrier_gateway: + state: absent + name: "{{ cagw_name }}" + register: cagw + check_mode: true + + - assert: + that: + - cagw.changed + + - name: test idempotence + ec2_carrier_gateway: + state: absent + name: "{{ cagw_name }}" + register: cagw + check_mode: true + + - assert: + that: + - not cagw.changed + + always: + + - debug: msg="Removing test dependencies" + + - name: delete carrier gateway + ec2_carrier_gateway: + state: absent + carrier_gateway_id: '{{ cagw.carrier_gateway_id }}' + ignore_errors: true + check_mode: true + + - name: delete vpc + ec2_vpc_net: + name: "{{ vpc_name }}-{{ item }}" + state: absent + cidr_block: "{{ vpc_cidr }}" + loop: [1, 2] + register: result + retries: 10 + delay: 5 + until: result is not failed + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/tasks/tags.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/tasks/tags.yml new file mode 100644 index 000000000..07104daa7 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_carrier_gateway/tasks/tags.yml @@ -0,0 +1,224 @@ +- vars: + first_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + 'New Key with Spaces': Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + final_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + name_tags: + Name: '{{ cagw_name }}' + module_defaults: + ec2_carrier_gateway: + name: '{{ cagw_name }}' + ec2_carrier_gateway_info: + vpn_gateway_ids: ['{{ cagw_id }}'] + block: + + # ============================================================ + + - name: add tags + ec2_carrier_gateway: + tags: '{{ first_tags }}' + state: 'present' + register: tag_cagw + check_mode: true + + - name: get VPC CAGW facts + ec2_carrier_gateway_info: {} + register: tag_cagw_info + + - name: verify the tags were added + assert: + that: + - tag_cagw is changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == ( first_tags | combine(name_tags) ) + + - name: add tags - IDEMPOTENCY + ec2_carrier_gateway: + tags: '{{ first_tags }}' + state: 'present' + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: {} + register: tag_carrier_gateway_info + + - name: verify no change + assert: + that: + - tag_cagw is not changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == ( first_tags | combine(name_tags) ) + + # ============================================================ + + - name: get VPC CAGW facts by filter + ec2_carrier_gateway_info: + filters: + 'tag:Name': '{{ cagw_name }}' + vpn_gateway_ids: '{{ omit }}' + register: tag_cagw_info + + - name: assert the facts are the same as before + assert: + that: + - tag_cagw_info.carrier_gateways | length == 1 + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + + # ============================================================ + + - name: modify tags with purge + ec2_carrier_gateway: + tags: '{{ second_tags }}' + state: 'present' + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: + register: tag_cagw_info + + - name: verify the tags were added + assert: + that: + - tag_cagw is changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == ( second_tags | combine(name_tags) ) + + - name: modify tags with purge - IDEMPOTENCY + ec2_carrier_gateway: + tags: '{{ second_tags }}' + state: 'present' + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: + register: tag_cagw_info + + - name: verify no change + assert: + that: + - tag_cagw is not changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == ( second_tags | combine(name_tags) ) + + # ============================================================ + + - name: modify tags without purge + ec2_carrier_gateway: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: + register: tag_cagw_info + + - name: verify the tags were added + assert: + that: + - tag_cagw is changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == ( final_tags | combine(name_tags) ) + + - name: modify tags without purge - IDEMPOTENCY + ec2_carrier_gateway: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: + register: tag_cagw_info + + - name: verify no change + assert: + that: + - tag_cagw is not changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == ( final_tags | combine(name_tags) ) + + # ============================================================ + + - name: No change to tags without setting tags + ec2_carrier_gateway: + state: 'present' + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: + register: tag_cagw_info + + - name: verify the tags were added + assert: + that: + - tag_cagw is not changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == ( final_tags | combine(name_tags) ) + + # ============================================================ + + - name: remove non name tags + ec2_carrier_gateway: + tags: {} + state: 'present' + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: + register: tag_cagw_info + + - name: verify the tags were added + assert: + that: + - tag_cagw is changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == name_tags + + - name: remove non name tags - IDEMPOTENCY + ec2_carrier_gateway: + tags: {} + state: 'present' + register: tag_cagw + check_mode: true + - name: get VPC CAGW facts + ec2_carrier_gateway_info: + register: tag_cagw_info + + - name: verify no change + assert: + that: + - tag_cagw is not changed + - tag_cagw.carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].carrier_gateway_id == cagw_id + - tag_cagw_info.carrier_gateways[0].tags == name_tags diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml index ca18dd30f..1471b11f6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml @@ -1,5 +1,2 @@ dependencies: - setup_ec2_facts - - role: setup_botocore_pip - vars: - botocore_version: "1.23.30" diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml index afe907f4f..7648f00ef 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml @@ -1,53 +1,5 @@ --- -- name: test with older boto3 version that does not support instance_metadata_tags - block: - - name: fail metadata_options - ec2_launch_template: - name: "{{ resource_prefix }}-test-metadata" - metadata_options: - http_put_response_hop_limit: 1 - http_tokens: required - http_protocol_ipv6: enabled - instance_metadata_tags: enabled - state: present - register: metadata_options_launch_template - ignore_errors: yes - - name: verify fail with usefull error message - assert: - that: - - metadata_options_launch_template.failed - - metadata_options_launch_template is not changed - - "'This is required to set instance_metadata_tags' in metadata_options_launch_template.msg" - - - name: success metadata_options - ec2_launch_template: - name: "{{ resource_prefix }}-test-metadata" - metadata_options: - http_put_response_hop_limit: 1 - http_tokens: required - state: present - register: metadata_options_launch_template - - name: instance with metadata_options created with the right options - assert: - that: - - metadata_options_launch_template is changed - - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_put_response_hop_limit == 1" - - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_tokens == 'required'" - - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_protocol_ipv6 is not defined" - - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.instance_metadata_tags is not defined" - always: - - name: delete the template - ec2_launch_template: - name: "{{ resource_prefix }}-test-metadata" - state: absent - register: del_lt - retries: 10 - until: del_lt is not failed - ignore_errors: true - -- name: test with boto3 version that supports instance_metadata_tags - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" +- name: instance_metadata_tags block: - name: metadata_options ec2_launch_template: diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/main.yml index aa87871ce..e89dfceb5 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/main.yml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - include_tasks: cpu_options.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/tags_and_vpc_settings.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/tags_and_vpc_settings.yml index 026c59907..41ff9082b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/tags_and_vpc_settings.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/tasks/tags_and_vpc_settings.yml @@ -31,7 +31,7 @@ register: testing_subnet_b - name: create a security group with the vpc - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" @@ -164,7 +164,7 @@ always: - name: remove the security group - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml index 9e5ae6a93..ce626b69c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml @@ -24,7 +24,7 @@ retries: 10 - name: remove the security group - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_setup.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_setup.yml index 88f5bb6fe..d48bae66c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/env_setup.yml @@ -48,7 +48,7 @@ - "{{ testing_subnet_b.subnet.id }}" - name: create a security group with the vpc - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/main.yml index 91fd9497c..10695571e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_placement_group/tasks/main.yml @@ -1,9 +1,9 @@ - name: run ec2_placement_group tests module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws @@ -25,7 +25,7 @@ - assert: that: - pg_1_create_check_mode is changed - - pg_1_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create_check_mode.placement_group.name == resource_prefix ~ '-pg1' - pg_1_create_check_mode.placement_group.state == "DryRun" - '"ec2:CreatePlacementGroup" in pg_1_create_check_mode.resource_actions' @@ -41,7 +41,7 @@ - assert: that: - pg_1_create is changed - - pg_1_create.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create.placement_group.name == resource_prefix ~ '-pg1' - pg_1_create.placement_group.state == "available" - '"ec2:CreatePlacementGroup" in pg_1_create.resource_actions' @@ -54,7 +54,7 @@ - assert: that: - pg_1_info_result is not changed - - pg_1_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg1' + - pg_1_info_result.placement_groups[0].name == resource_prefix ~ '-pg1' - pg_1_info_result.placement_groups[0].state == "available" - pg_1_info_result.placement_groups[0].strategy == "cluster" - '"ec2:DescribePlacementGroups" in pg_1_info_result.resource_actions' @@ -68,7 +68,7 @@ - assert: that: - pg_1_create is not changed - - pg_1_create.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create.placement_group.name == resource_prefix ~ '-pg1' - pg_1_create.placement_group.state == "available" - '"ec2:CreatePlacementGroup" not in pg_1_create.resource_actions' @@ -82,7 +82,7 @@ - assert: that: - pg_1_create_check_mode_idem is not changed - - pg_1_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create_check_mode_idem.placement_group.name == resource_prefix ~ '-pg1' - pg_1_create_check_mode_idem.placement_group.state == "available" - '"ec2:CreatePlacementGroup" not in pg_1_create_check_mode_idem.resource_actions' @@ -97,7 +97,7 @@ - assert: that: - pg_2_create_check_mode is changed - - pg_2_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create_check_mode.placement_group.name == resource_prefix ~ '-pg2' - pg_2_create_check_mode.placement_group.state == "DryRun" - '"ec2:CreatePlacementGroup" in pg_2_create_check_mode.resource_actions' @@ -111,7 +111,7 @@ - assert: that: - pg_2_create is changed - - pg_2_create.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create.placement_group.name == resource_prefix ~ '-pg2' - pg_2_create.placement_group.state == "available" - '"ec2:CreatePlacementGroup" in pg_2_create.resource_actions' @@ -127,7 +127,7 @@ - assert: that: - pg_2_info_result is not changed - - pg_2_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg2' + - pg_2_info_result.placement_groups[0].name == resource_prefix ~ '-pg2' - pg_2_info_result.placement_groups[0].state == "available" - pg_2_info_result.placement_groups[0].strategy == "spread" - '"ec2:DescribePlacementGroups" in pg_2_info_result.resource_actions' @@ -142,7 +142,7 @@ - assert: that: - pg_2_create is not changed - - pg_2_create.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create.placement_group.name == resource_prefix ~ '-pg2' - pg_2_create.placement_group.state == "available" - '"ec2:CreatePlacementGroup" not in pg_2_create.resource_actions' @@ -157,7 +157,7 @@ - assert: that: - pg_2_create_check_mode_idem is not changed - - pg_2_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create_check_mode_idem.placement_group.name == resource_prefix ~ '-pg2' - pg_2_create_check_mode_idem.placement_group.state == "available" - '"ec2:CreatePlacementGroup" not in pg_2_create_check_mode_idem.resource_actions' @@ -173,7 +173,7 @@ - assert: that: - pg_3_create_check_mode is changed - - pg_3_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create_check_mode.placement_group.name == resource_prefix ~ '-pg3' - pg_3_create_check_mode.placement_group.state == "DryRun" - '"ec2:CreatePlacementGroup" in pg_3_create_check_mode.resource_actions' @@ -188,7 +188,7 @@ - assert: that: - pg_3_create is changed - - pg_3_create.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create.placement_group.name == resource_prefix ~ '-pg3' - pg_3_create.placement_group.state == "available" - '"ec2:CreatePlacementGroup" in pg_3_create.resource_actions' @@ -205,7 +205,7 @@ - assert: that: - pg_3_info_result is not changed - - pg_3_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg3' + - pg_3_info_result.placement_groups[0].name == resource_prefix ~ '-pg3' - pg_3_info_result.placement_groups[0].state == "available" - pg_3_info_result.placement_groups[0].strategy == "partition" - '"ec2:DescribePlacementGroups" in pg_3_info_result.resource_actions' @@ -221,7 +221,7 @@ - assert: that: - pg_3_create is not changed - - pg_3_create.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create.placement_group.name == resource_prefix ~ '-pg3' - pg_3_create.placement_group.state == "available" - '"ec2:CreatePlacementGroup" not in pg_3_create.resource_actions' @@ -237,7 +237,7 @@ - assert: that: - pg_3_create_check_mode_idem is not changed - - pg_3_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create_check_mode_idem.placement_group.name == resource_prefix ~ '-pg3' - pg_3_create_check_mode_idem.placement_group.state == "available" - '"ec2:CreatePlacementGroup" not in pg_3_create_check_mode_idem.resource_actions' diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml index 6cb279f77..c7353cfc0 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway_vpc_attachment/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway_vpc_attachment/tasks/main.yml index 8694b829e..ce9659473 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway_vpc_attachment/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway_vpc_attachment/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml index 41540b8d4..75fff0e4e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml index e1538049a..36c7ab2d8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_peer/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_peer/tasks/main.yml index cdb7c6680..b39b69b74 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_peer/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_peer/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - name: get ARN of calling user diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml index 37bbf5e37..f5a850a71 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn/tasks/main.yml index a4c740887..9514d7cf3 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -35,7 +35,63 @@ name: testcgw register: cgw - - name: create vpn connection, with customer gateway + - name: create transit gateway + ec2_transit_gateway: + description: "Transit Gateway for vpn attachment" + register: tgw + + - name: create vpn connection, with customer gateway, vpn_gateway_id and transit_gateway + ec2_vpc_vpn: + customer_gateway_id: '{{ cgw.gateway.customer_gateway.customer_gateway_id }}' + vpn_gateway_id: '{{ vgw.vgw.id }}' + transit_gateway_id: '{{ tgw.transit_gateway.transit_gateway_id }}' + state: present + register: result + ignore_errors: true + + - name: assert creation of vpn failed + assert: + that: + - result is failed + - result.msg == "parameters are mutually exclusive: vpn_gateway_id|transit_gateway_id" + + + - name: create vpn connection, with customer gateway and transit_gateway + ec2_vpc_vpn: + customer_gateway_id: '{{ cgw.gateway.customer_gateway.customer_gateway_id }}' + transit_gateway_id: '{{ tgw.transit_gateway.transit_gateway_id }}' + state: present + register: tgw_vpn + + - name: Store ID of VPN + set_fact: + vpn_id: '{{ tgw_vpn.vpn_connection_id }}' + + # ============================================================ + - name: test success with no parameters + ec2_vpc_vpn_info: + register: result + + - name: assert success with no parameters + assert: + that: + - 'result.changed == false' + - 'result.vpn_connections != []' + # ============================================================ + + - name: Delete vpn created with transit gateway + ec2_vpc_vpn: + state: absent + vpn_connection_id: '{{ vpn_id }}' + register: result + retries: 10 + delay: 3 + until: result is not failed + ignore_errors: true + + # ============================================================ + + - name: create vpn connection, with customer gateway and vpn gateway ec2_vpc_vpn: customer_gateway_id: '{{ cgw.gateway.customer_gateway.customer_gateway_id }}' vpn_gateway_id: '{{ vgw.vgw.id }}' @@ -47,6 +103,7 @@ vpn_id: '{{ vpn.vpn_connection_id }}' # ============================================================ + - name: test success with no parameters ec2_vpc_vpn_info: register: result @@ -163,3 +220,9 @@ delay: 3 until: result is not failed ignore_errors: true + + - name: delete transit gateway + ec2_transit_gateway: + transit_gateway_id: '{{ tgw.transit_gateway.transit_gateway_id }}' + state: absent + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml index 7f42526eb..32cf5dda7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml @@ -1,4 +1 @@ -dependencies: - - role: setup_botocore_pip - vars: - botocore_version: "1.24.14" +dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml index 31ca3cf27..14c1b6337 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml @@ -72,7 +72,7 @@ register: igw - name: create a security group to use for creating an ec2 instance - ec2_group: + ec2_security_group: name: '{{ resource_prefix }}_ecs_cluster-sg' description: 'created by Ansible integration tests' state: present @@ -86,9 +86,9 @@ # As a lookup plugin we don't have access to module_defaults connection_args: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" no_log: True - name: set image id fact diff --git a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml index 4e0620555..3c4bbcb28 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml @@ -86,8 +86,6 @@ - not ecs_service_again.changed - name: create same ECS service definition via force_new_deployment - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present force_new_deployment: true @@ -113,8 +111,6 @@ - ecs_service_again.changed - name: force_new_deployment should work without providing a task_definition - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present force_new_deployment: yes @@ -139,8 +135,6 @@ - ecs_service_notaskdef.changed - name: attempt to use ECS network configuration on task definition without awsvpc network_mode (expected to fail) - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}3" @@ -166,8 +160,6 @@ - ecs_service_network_without_awsvpc_task is failed - name: scale down ECS service - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}" @@ -191,8 +183,6 @@ - ecs_service_scale_down.service.desiredCount == 0 - name: scale down ECS service again - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}" @@ -228,8 +218,6 @@ - ecs_task_update.changed - name: Enable ExecuteCommand - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}" @@ -315,8 +303,6 @@ - "ecs_taskdefinition_info.network_mode == 'awsvpc'" - name: create ECS service definition with network configuration - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}2" @@ -344,8 +330,6 @@ - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" - name: create ecs_service using health_check_grace_period_seconds - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-mft" cluster: "{{ ecs_cluster_name }}" @@ -364,11 +348,9 @@ assert: that: - ecs_service_creation_hcgp.changed - - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 30" + - ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds == 30 - name: update ecs_service using health_check_grace_period_seconds - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-mft" cluster: "{{ ecs_cluster_name }}" @@ -386,11 +368,9 @@ assert: that: - ecs_service_creation_hcgp2.changed - - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 10" + - ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds == 10 - name: update ecs_service using REPLICA scheduling_strategy - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-replica" cluster: "{{ ecs_cluster_name }}" @@ -473,8 +453,8 @@ assert: that: - ecs_task_definition_constraints is changed - - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == "{{ ecs_taskdefinition_placement_constraints[0].type }}" - - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == "{{ ecs_taskdefinition_placement_constraints[0].expression }}" + - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == ecs_taskdefinition_placement_constraints[0].type + - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == ecs_taskdefinition_placement_constraints[0].expression - name: Remove ecs task definition with placement constraints ecs_taskdefinition: @@ -517,8 +497,6 @@ - "ecs_service_create_no_load_balancer.service.loadBalancers | length == 0" - name: Update ecs_service load balancer - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-lb" cluster: "{{ ecs_cluster_name }}" @@ -541,8 +519,6 @@ - "ecs_service_update_load_balancer.service.loadBalancers[0].targetGroupArn == elb_target_group_instance.target_group_arn" - name: Create ecs service with placement constraints - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-constraint" cluster: "{{ ecs_cluster_name }}" @@ -593,8 +569,6 @@ until: "ECS.services[0].deployments[0].rolloutState == 'COMPLETED'" - name: Update ecs service's placement constraints - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-constraint" cluster: "{{ ecs_cluster_name }}" @@ -621,8 +595,6 @@ - "ecs_service_update_constraints.service.placementConstraints[0].expression == 'attribute:ecs.instance-type == t3.micro'" - name: Remove ecs service's placement constraints - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-constraint" cluster: "{{ ecs_cluster_name }}" @@ -645,8 +617,6 @@ - "ecs_service_remove_constraints.service.placementConstraints | length == 0" - name: Create ecs service with placement strategy - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-strategy" cluster: "{{ ecs_cluster_name }}" @@ -672,8 +642,6 @@ - "ecs_service_creation_strategy.service.placementStrategy[0].field == 'MEMORY'" - name: Update ecs service's placement strategy - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-strategy" cluster: "{{ ecs_cluster_name }}" @@ -700,8 +668,6 @@ - "ecs_service_update_strategy.service.placementStrategy[0].field == 'instanceId'" - name: Remove ecs service's placement strategy - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-strategy" cluster: "{{ ecs_cluster_name }}" @@ -942,6 +908,65 @@ started_by: ansible_user register: fargate_run_task_output_with_assign_ip +- name: create task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 + register: fargate_arm_task_definition + +- name: check that initial task definition for ARM changes + assert: + that: + - fargate_arm_task_definition.changed + +- name: recreate task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 + register: fargate_arm_task_definition_again + +- name: check that task definition for ARM does not change + assert: + that: + - not fargate_arm_task_definition_again.changed + +- name: delete task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 # ============================================================ # End tests for Fargate diff --git a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml index 7016f9e70..5d7ba5c72 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml @@ -18,8 +18,6 @@ ignore_errors: true - name: scale down ECS service - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}" @@ -44,8 +42,6 @@ register: ecs_service_info - name: scale down second ECS service - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}2" @@ -62,8 +58,6 @@ register: ecs_service_scale_down - name: scale down multifunction-test service - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-mft" cluster: "{{ ecs_cluster_name }}" @@ -78,8 +72,6 @@ register: ecs_service_scale_down - name: scale down scheduling_strategy service - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: name: "{{ ecs_service_name }}-replica" cluster: "{{ ecs_cluster_name }}" @@ -94,8 +86,6 @@ register: ecs_service_scale_down - name: scale down Fargate ECS service - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" ecs_service: state: present name: "{{ ecs_service_name }}4" @@ -271,7 +261,7 @@ register: this_deletion - name: remove security groups - ec2_group: + ec2_security_group: name: '{{ item }}' description: 'created by Ansible integration tests' state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml index 1d27cdc73..12d3cb52b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -4,15 +4,15 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: - - include: 01_create_requirements.yml - - include: 10_ecs_cluster.yml - - include: 20_ecs_service.yml + - include_tasks: 01_create_requirements.yml + - include_tasks: 10_ecs_cluster.yml + - include_tasks: 20_ecs_service.yml always: - - include: 99_terminate_everything.yml + - include_tasks: 99_terminate_everything.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml index e0ce4f3f6..68750e06e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml @@ -2,9 +2,9 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" block: - set_fact: @@ -15,7 +15,7 @@ register: aws_caller_info - name: create KMS key for testing - aws_kms: + kms_key: alias: "{{ resource_prefix }}-ecr" description: a key used for testing ECR state: present @@ -597,7 +597,7 @@ - name: it should use the provided KMS key assert: that: - - result.repository.encryptionConfiguration.kmsKey == '{{ kms_test_key.key_arn }}' + - result.repository.encryptionConfiguration.kmsKey == kms_test_key.key_arn always: @@ -607,6 +607,6 @@ state: absent - name: Delete KMS key - aws_kms: + kms_key: key_id: '{{ kms_test_key.key_arn }}' state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml index fff9ee27d..2c5614eb8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml @@ -1,9 +1,9 @@ - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' collections: - amazon.aws block: @@ -73,7 +73,7 @@ assert: that: - taglist.changed == true - - taglist.added_tags.Name == "{{ resource_prefix }}" + - taglist.added_tags.Name == resource_prefix - taglist.added_tags.another == "foobar" - name: cluster tags - Add tags to cluster again @@ -162,8 +162,8 @@ assert: that: - taglist.changed == true - - taglist.added_tags.Name == "service-{{ resource_prefix }}" - - taglist.tags.Name == "service-{{ resource_prefix }}" + - "taglist.added_tags.Name == 'service-' ~ resource_prefix" + - "taglist.tags.Name == 'service-' ~ resource_prefix" - name: services tags - Add name tag again - see no change ecs_tag: @@ -179,7 +179,7 @@ assert: that: - taglist.changed == false - - taglist.tags.Name == "service-{{ resource_prefix }}" + - "taglist.tags.Name == 'service-' ~ resource_prefix" - name: service tags - remove service tags ecs_tag: @@ -215,8 +215,8 @@ assert: that: - taglist.changed == true - - taglist.added_tags.Name == "task_definition-{{ resource_prefix }}" - - taglist.tags.Name == "task_definition-{{ resource_prefix }}" + - "taglist.added_tags.Name == 'task_definition-' ~ resource_prefix" + - "taglist.tags.Name == 'task_definition-' ~ resource_prefix" - name: task_definition tags - Add name tag again - see no change ecs_tag: @@ -232,7 +232,7 @@ assert: that: - taglist.changed == false - - taglist.tags.Name == "task_definition-{{ resource_prefix }}" + - "taglist.tags.Name == 'task_definition-' ~ resource_prefix" - name: task_definition tags - remove task_definition tags ecs_tag: diff --git a/ansible_collections/community/aws/tests/integration/targets/efs/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/efs/tasks/main.yml index d2e9d4bee..bc23f3a11 100644 --- a/ansible_collections/community/aws/tests/integration/targets/efs/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/efs/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -41,7 +41,7 @@ register: testing_subnet_b - name: Get default security group id for vpc - ec2_group_info: + ec2_security_group_info: filters: vpc-id: "{{ testing_vpc.vpc.id }}" register: sg_facts @@ -98,7 +98,7 @@ - efs_result.efs[0].mount_targets[1].security_groups[0] == vpc_default_sg_id - assert: - that: "{{efs_result_assertions}}" + that: efs_result_assertions # ============================================================ - name: Get EFS by id @@ -107,7 +107,7 @@ register: efs_result - assert: - that: "{{efs_result_assertions}}" + that: efs_result_assertions # ============================================================ - name: Get EFS by tag @@ -117,7 +117,7 @@ register: efs_result - assert: - that: "{{efs_result_assertions}}" + that: efs_result_assertions # ============================================================ - name: Get EFS by target (subnet_id) @@ -127,7 +127,7 @@ register: efs_result - assert: - that: "{{efs_result_assertions}}" + that: efs_result_assertions # ============================================================ - name: Get EFS by target (security_group_id) @@ -137,7 +137,7 @@ register: efs_result - assert: - that: "{{efs_result_assertions}}" + that: efs_result_assertions # ============================================================ - name: Get EFS by tag and target @@ -149,7 +149,7 @@ register: efs_result - assert: - that: "{{efs_result_assertions}}" + that: efs_result_assertions # ============================================================ # Not checking efs_result.efs["throughput_mode"] here as @@ -231,7 +231,7 @@ - efs_result.efs[0].file_system_id == created_efs.efs.file_system_id - assert: - that: "{{efs_result_assertions}}" + that: efs_result_assertions # ============================================================ - name: Efs configure IA transition @@ -332,9 +332,9 @@ efs_tag: state: present resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: check_mode_tag: 'this tag should not be applied' @@ -349,9 +349,9 @@ efs_tag: state: present resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: "Title Case": 'Hello Cruel World' @@ -366,7 +366,7 @@ - efs_tag_result.tags.Env is defined - efs_tag_result.tags.Env is search("IntegrationTests") - efs_tag_result.tags.Name is defined - - efs_tag_result.tags.Name is search("{{ efs_name }}-test-tag") + - efs_tag_result.tags.Name is search(efs_name ~ '-test-tag') - efs_tag_result.tags["CamelCase"] == 'SimpleCamelCase' - efs_tag_result.tags["Title Case"] == 'Hello Cruel World' - efs_tag_result.tags["lowercase spaced"] == 'hello cruel world' @@ -377,9 +377,9 @@ efs_tag: state: present resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: Env: IntegrationTests @@ -394,9 +394,9 @@ efs_tag: state: absent resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: snake_case: 'simple_snake_case' @@ -412,9 +412,9 @@ efs_tag: state: present resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: Env: OtherIntegrationTests @@ -430,9 +430,9 @@ efs_tag: state: present resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: Env: OtherIntegrationTests @@ -448,9 +448,9 @@ efs_tag: state: absent resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: "Title Case": 'Hello Cruel World' @@ -464,7 +464,7 @@ - efs_tag_result.tags.Env is defined - efs_tag_result.tags.Env is search("IntegrationTests") - efs_tag_result.tags.Name is defined - - efs_tag_result.tags.Name is search("{{ efs_name }}-test-tag") + - efs_tag_result.tags.Name is search(efs_name ~ '-test-tag') - not efs_tag_result.tags["CamelCase"] is defined - not efs_tag_result.tags["Title Case"] is defined - not efs_tag_result.tags["lowercase spaced"] is defined @@ -474,9 +474,9 @@ efs_tag: state: absent resource: "{{ created_efs.efs.file_system_id }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: "{{ aws_region }}" tags: snake_case: 'simple_snake_case' @@ -491,9 +491,9 @@ state: absent resource: "{{ created_efs.efs.file_system_id }}" region: "{{ aws_region }}" - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' tags: {} purge_tags: true register: efs_tag_result diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/full_test.yml b/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/full_test.yml index e3aca2863..71cc1fc87 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/full_test.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/full_test.yml @@ -4,12 +4,12 @@ # If us-west-1 does become supported, change this test to use an unsupported region # or if all regions are supported, delete this test - name: attempt to use eks in unsupported region - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" state: absent - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: us-west-1 register: aws_eks_unsupported_region ignore_errors: yes @@ -21,7 +21,7 @@ - '"msg" in aws_eks_unsupported_region' - name: delete an as yet non-existent EKS cluster - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" state: absent register: aws_eks_delete_non_existent @@ -64,7 +64,7 @@ - "{{ eks_subnets }}" - name: create security groups to use for EKS - ec2_group: + ec2_security_group: name: "{{ item.name }}" description: "{{ item.description }}" state: present @@ -75,7 +75,7 @@ register: setup_security_groups - name: create EKS cluster - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" security_groups: "{{ eks_security_groups | map(attribute='name') }}" subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}" @@ -93,7 +93,7 @@ - eks_create.tags.another == "foobar" - name: create EKS cluster with same details but wait for it to become active - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" security_groups: "{{ eks_security_groups | map(attribute='name') }}" subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}" @@ -113,7 +113,7 @@ - eks_create.endpoint != "" - name: create EKS cluster with same details but using SG ids - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" security_groups: "{{ setup_security_groups.results | map(attribute='group_id') }}" subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}" @@ -127,7 +127,7 @@ - eks_create.name == eks_cluster_name - name: remove EKS cluster, waiting until complete - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" state: absent wait: yes @@ -139,7 +139,7 @@ - eks_delete is changed - name: create EKS cluster with same details but wait for it to become active - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" security_groups: "{{ eks_security_groups | map(attribute='name') }}" subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}" @@ -154,7 +154,7 @@ - eks_create.name == eks_cluster_name - name: remove EKS cluster, without waiting this time - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" state: absent register: eks_delete @@ -165,7 +165,7 @@ - eks_delete is changed - name: create EKS cluster with short name - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_short_name }}" security_groups: "{{ eks_security_groups | map(attribute='name') }}" subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}" @@ -180,7 +180,7 @@ - eks_create is not failed - name: remove EKS cluster with short name - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_short_name }}" state: absent wait: yes @@ -192,7 +192,7 @@ msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" - name: remove EKS cluster - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_name }}" state: absent wait: yes @@ -200,7 +200,7 @@ ignore_errors: yes - name: remove EKS cluster - aws_eks_cluster: + eks_cluster: name: "{{ eks_cluster_short_name }}" state: absent wait: yes @@ -216,7 +216,7 @@ - name: "{{ eks_cluster_name }}-workers-sg" - name: set all security group rule lists to empty to remove circular dependency - ec2_group: + ec2_security_group: name: "{{ item.name }}" description: "{{ item.description }}" state: present @@ -229,7 +229,7 @@ ignore_errors: yes - name: remove security groups - ec2_group: + ec2_security_group: name: '{{ item.name }}' state: absent vpc_id: '{{ setup_vpc.vpc.id }}' diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/main.yml index 61aa32cd1..0f414f56f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_cluster/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: - include_tasks: full_test.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/cleanup_eks_cluster.yml b/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/cleanup_eks_cluster.yml index d30761fa3..21adb30a8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/cleanup_eks_cluster.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/cleanup_eks_cluster.yml @@ -5,7 +5,7 @@ ignore_errors: true - name: remove EKS cluster - aws_eks_cluster: + eks_cluster: name: '{{ eks_cluster_name }}' state: absent wait: 'yes' @@ -17,7 +17,7 @@ - name: '{{ eks_cluster_name }}-workers-sg' - name: set all security group rule lists to empty to remove circular dependency - ec2_group: + ec2_security_group: name: '{{ item.name }}' description: '{{ item.description }}' state: present @@ -30,7 +30,7 @@ ignore_errors: 'yes' - name: remove security groups - ec2_group: + ec2_security_group: name: '{{ item.name }}' state: absent vpc_id: '{{ setup_vpc.vpc.id }}' diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml b/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml index d5affa5b5..48fbbef80 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml @@ -72,7 +72,7 @@ register: nat_route_table - name: create security groups to use for EKS - ec2_group: + ec2_security_group: name: '{{ item.name }}' description: '{{ item.description }}' state: present @@ -83,7 +83,7 @@ register: setup_security_groups - name: create EKS cluster - aws_eks_cluster: + eks_cluster: name: '{{ eks_cluster_name }}' security_groups: '{{ eks_security_groups | map(attribute=''name'') }}' subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}' @@ -94,4 +94,4 @@ - name: check that EKS cluster was created assert: that: - - eks_create.name == eks_cluster_name
\ No newline at end of file + - eks_create.name == eks_cluster_name diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/main.yaml b/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/main.yaml index 77298dc81..d6606e3db 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/main.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_fargate_profile/tasks/main.yaml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: - include_tasks: create_eks_cluster.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/aliases b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/aliases index 0b84301d7..1809e989b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/aliases @@ -1 +1,2 @@ -cloud/aws
\ No newline at end of file +cloud/aws +time=30m diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml index ff841f0f5..8bdb5bad4 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml @@ -5,7 +5,7 @@ ignore_errors: yes - name: remove EKS cluster - aws_eks_cluster: + eks_cluster: name: '{{ eks_cluster_name }}' state: absent wait: 'yes' @@ -17,7 +17,7 @@ - name: '{{ eks_cluster_name }}-workers-sg' - name: set all security group rule lists to empty to remove circular dependency - ec2_group: + ec2_security_group: name: '{{ item.name }}' description: '{{ item.description }}' state: present @@ -30,7 +30,7 @@ ignore_errors: 'yes' - name: remove security groups - ec2_group: + ec2_security_group: name: '{{ item.name }}' state: absent vpc_id: '{{ setup_vpc.vpc.id }}' @@ -74,10 +74,10 @@ state: absent vpc_id: '{{ setup_vpc.vpc.id}}' ignore_errors: 'yes' - + - name: remove setup VPC ec2_vpc_net: cidr_block: 10.0.0.0/16 state: absent name: '{{ resource_prefix }}_aws_eks' - ignore_errors: 'yes'
\ No newline at end of file + ignore_errors: 'yes' diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml index dd6efd27a..882d45dd7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml @@ -2,7 +2,7 @@ # This space was a copy by aws_eks_cluster integration test - name: ensure IAM instance role exists iam_role: - name: ansible-test-eks_cluster_role + name: ansible-test-{{ tiny_prefix }}-eks_nodegroup-cluster assume_role_policy_document: '{{ lookup(''file'',''eks-trust-policy.json'') }}' state: present create_instance_profile: 'no' @@ -44,7 +44,7 @@ community.aws.ec2_vpc_route_table: vpc_id: '{{ setup_vpc.vpc.id }}' tags: - Name: EKS + Name: "EKS-ng-{{ tiny_prefix }}" subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}' routes: - dest: 0.0.0.0/0 @@ -52,7 +52,7 @@ register: public_route_table - name: create security groups to use for EKS - ec2_group: + ec2_security_group: name: '{{ item.name }}' description: '{{ item.description }}' state: present @@ -63,7 +63,7 @@ register: setup_security_groups - name: create EKS cluster - aws_eks_cluster: + eks_cluster: name: '{{ eks_cluster_name }}' security_groups: '{{ eks_security_groups | map(attribute=''name'') }}' subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}' @@ -77,9 +77,9 @@ - eks_create.name == eks_cluster_name # Dependecies to eks nodegroup -- name: create IAM instance role +- name: create IAM instance role iam_role: - name: 'ansible-test-eks_nodegroup' + name: 'ansible-test-{{ tiny_prefix }}-eks_nodegroup-ng' assume_role_policy_document: '{{ lookup(''file'',''eks-nodegroup-trust-policy.json'') }}' state: present create_instance_profile: no diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/full_test.yml b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/full_test.yml index dcb35d2d1..9accc8e8f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/full_test.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/full_test.yml @@ -445,7 +445,6 @@ state: absent cluster_name: '{{ eks_cluster_name }}' register: eks_nodegroup_result - check_mode: True - name: check that eks_nodegroup is not changed (idempotency) assert: @@ -578,9 +577,21 @@ cluster_name: '{{ eks_cluster_name }}' wait: True register: eks_nodegroup_result - check_mode: True - name: check that eks_nodegroup is not changed (idempotency) assert: that: - - eks_nodegroup_result is not changed
\ No newline at end of file + - eks_nodegroup_result is not changed + +- name: wait for deletion of name_a nodegroup (idempotency) + eks_nodegroup: + name: '{{ eks_nodegroup_name_a }}' + state: absent + cluster_name: '{{ eks_cluster_name }}' + wait: True + register: eks_nodegroup_result + +- name: check that eks_nodegroup is not changed (idempotency) + assert: + that: + - eks_nodegroup_result is not changed diff --git a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/main.yml index 9f896bec6..5c1a76f57 100644 --- a/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/eks_nodegroup/tasks/main.yml @@ -5,9 +5,9 @@ - amozon.community module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: - include_tasks: dependecies.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/elasticache/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elasticache/tasks/main.yml index 31ae3d9cf..9664a70f1 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elasticache/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elasticache/tasks/main.yml @@ -3,9 +3,9 @@ - name: Integration testing for the elasticache module module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' collections: - amazon.aws @@ -60,11 +60,11 @@ that: - elasticache_redis is changed - elasticache_redis.elasticache.data is defined - - elasticache_redis.elasticache.name == "{{ elasticache_redis_test_name }}" - - elasticache_redis.elasticache.data.CacheSubnetGroupName == "{{ elasticache_subnet_group_name }}" + - elasticache_redis.elasticache.name == elasticache_redis_test_name + - elasticache_redis.elasticache.data.CacheSubnetGroupName == elasticache_subnet_group_name - name: Add security group for Redis access in Elasticache - ec2_group: + ec2_security_group: name: "{{ elasticache_redis_sg_name }}" description: Allow access to Elasticache Redis for testing EC module vpc_id: "{{ elasticache_vpc.vpc.id }}" @@ -186,7 +186,7 @@ state: absent - name: Make sure Redis Security Group is deleted again - ec2_group: + ec2_security_group: name: "{{ elasticache_redis_sg_name }}" state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/elasticache_subnet_group/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elasticache_subnet_group/tasks/main.yml index 5814f9dc9..921a37eb0 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elasticache_subnet_group/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elasticache_subnet_group/tasks/main.yml @@ -8,9 +8,9 @@ # - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/elasticbeanstalk_app/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elasticbeanstalk_app/tasks/main.yml index d90a7ce8d..e1deb9df9 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elasticbeanstalk_app/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elasticbeanstalk_app/tasks/main.yml @@ -4,15 +4,15 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: # ============================================================ - name: test with no parameters - aws_elasticbeanstalk_app: + elasticbeanstalk_app: register: result ignore_errors: true @@ -23,7 +23,7 @@ # ============================================================ - name: test create app - aws_elasticbeanstalk_app: + elasticbeanstalk_app: app_name: "{{ app_name }}" description: "{{ description }}" state: present @@ -36,7 +36,7 @@ # ============================================================ - name: test create when app already exists - aws_elasticbeanstalk_app: + elasticbeanstalk_app: app_name: "{{ app_name }}" description: "{{ description }}" state: present @@ -49,7 +49,7 @@ # ============================================================ - name: make an update to an existing app - aws_elasticbeanstalk_app: + elasticbeanstalk_app: app_name: "{{ app_name }}" description: "{{ alternate_description }}" state: present @@ -62,7 +62,7 @@ # # ============================================================ # - name: fail deleting an app that has environments that exist -# aws_elasticbeanstalk_app: +# elasticbeanstalk_app: # app_name: "non_app" # state: absent # register: result @@ -75,7 +75,7 @@ # # ============================================================ # - name: deleting an app that has environments that exist with terminate_by_force True -# aws_elasticbeanstalk_app: +# elasticbeanstalk_app: # app_name: "non_app" # state: absent # terminate_by_force: True @@ -98,7 +98,7 @@ # # ============================================================ # - name: deleting an app that has environments that exist with terminate_by_force True -# aws_elasticbeanstalk_app: +# elasticbeanstalk_app: # app_name: "non_app" # state: absent # terminate_by_force: True @@ -111,7 +111,7 @@ # # ============================================================ - name: delete non existent app - aws_elasticbeanstalk_app: + elasticbeanstalk_app: app_name: "non_app" state: absent register: result @@ -125,7 +125,7 @@ # ============================================================ - name: delete existing app - aws_elasticbeanstalk_app: + elasticbeanstalk_app: app_name: "{{ app_name }}" state: absent register: result @@ -140,6 +140,6 @@ always: - name: delete existing app - aws_elasticbeanstalk_app: + elasticbeanstalk_app: app_name: "{{ app_name }}" state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb_info/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb_info/tasks/main.yml index e4cd8144b..b09e88072 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb_info/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb_info/tasks/main.yml @@ -18,10 +18,10 @@ - module_defaults: group/aws: - region: "{{ ec2_region }}" - ec2_access_key: "{{ ec2_access_key }}" - ec2_secret_key: "{{ ec2_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" block: # ============================================================ @@ -32,8 +32,8 @@ name: "{{ elb_name }}" state: present zones: - - "{{ ec2_region }}a" - - "{{ ec2_region }}b" + - "{{ aws_region }}a" + - "{{ aws_region }}b" listeners: - protocol: http load_balancer_port: 80 @@ -55,8 +55,8 @@ that: - create is changed # We rely on these for the info test, make sure they're what we expect - - '"{{ ec2_region }}a" in create.elb.zones' - - '"{{ ec2_region }}b" in create.elb.zones' + - aws_region ~ 'a' in create.elb.zones + - aws_region ~ 'b' in create.elb.zones - create.elb.health_check.healthy_threshold == 10 - create.elb.health_check.interval == 30 - create.elb.health_check.target == "HTTP:80/index.html" @@ -74,8 +74,8 @@ that: - info.elbs|length == 1 - elb.availability_zones|length == 2 - - '"{{ ec2_region }}a" in elb.availability_zones' - - '"{{ ec2_region }}b" in elb.availability_zones' + - aws_region ~ 'a' in elb.availability_zones + - aws_region ~ 'b' in elb.availability_zones - elb.health_check.healthy_threshold == 10 - elb.health_check.interval == 30 - elb.health_check.target == "HTTP:80/index.html" @@ -115,7 +115,7 @@ name: "{{ elb_name }}" state: present zones: - - "{{ ec2_region }}c" + - "{{ aws_region }}c" listeners: - protocol: http load_balancer_port: 80 @@ -134,7 +134,7 @@ - assert: that: - update_az is changed - - update_az.elb.zones[0] == "{{ ec2_region }}c" + - update_az.elb.zones[0] == aws_region ~ 'c' - name: Get ELB info after changing AZ's elb_classic_lb_info: @@ -144,7 +144,7 @@ - assert: that: - elb.availability_zones|length == 1 - - '"{{ ec2_region }}c" in elb.availability_zones[0]' + - aws_region ~ 'c' in elb.availability_zones[0] vars: elb: "{{ info.elbs[0] }}" @@ -157,9 +157,9 @@ name: "{{ elb_name }}" state: present zones: - - "{{ ec2_region }}a" - - "{{ ec2_region }}b" - - "{{ ec2_region }}c" + - "{{ aws_region }}a" + - "{{ aws_region }}b" + - "{{ aws_region }}c" listeners: - protocol: http load_balancer_port: 80 @@ -170,9 +170,9 @@ - assert: that: - update_az is changed - - '"{{ ec2_region }}a" in update_az.elb.zones' - - '"{{ ec2_region }}b" in update_az.elb.zones' - - '"{{ ec2_region }}c" in update_az.elb.zones' + - aws_region ~ 'a' in update_az.elb.zones + - aws_region ~ 'b' in update_az.elb.zones + - aws_region ~ 'c' in update_az.elb.zones - name: Get ELB info after updating AZ's elb_classic_lb_info: @@ -182,9 +182,9 @@ - assert: that: - elb.availability_zones|length == 3 - - '"{{ ec2_region }}a" in elb.availability_zones' - - '"{{ ec2_region }}b" in elb.availability_zones' - - '"{{ ec2_region }}c" in elb.availability_zones' + - aws_region ~ 'a' in elb.availability_zones + - aws_region ~ 'b' in elb.availability_zones + - aws_region ~ 'c' in elb.availability_zones vars: elb: "{{ info.elbs[0] }}" @@ -197,9 +197,9 @@ name: "{{ elb_name }}" state: present zones: - - "{{ ec2_region }}a" - - "{{ ec2_region }}b" - - "{{ ec2_region }}c" + - "{{ aws_region }}a" + - "{{ aws_region }}b" + - "{{ aws_region }}c" listeners: - protocol: http load_balancer_port: 80 @@ -235,9 +235,9 @@ name: "{{ elb_name }}" state: present zones: - - "{{ ec2_region }}a" - - "{{ ec2_region }}b" - - "{{ ec2_region }}c" + - "{{ aws_region }}a" + - "{{ aws_region }}b" + - "{{ aws_region }}c" listeners: - protocol: http load_balancer_port: 8081 diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml index 7ae91ac00..262bc99b2 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml @@ -9,14 +9,14 @@ ignore_errors: true - name: Delete ASG - ec2_asg: + autoscaling_group: name: '{{ asg_name }}' state: absent ignore_errors: true register: ec2_asg_a - name: Delete Launch Template - ec2_lc: + autoscaling_launch_config: name: '{{ lc_name }}' state: absent ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml index 9abeb74a2..754b685f6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml @@ -1,6 +1,6 @@ --- - name: delete security groups - ec2_group: + ec2_security_group: name: '{{ item }}' state: absent ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/main.yml index 247b6f6b6..3ab9be64d 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/main.yml @@ -2,9 +2,9 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - community.aws - amazon.aws diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/manage_asgs.yml b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/manage_asgs.yml index f0e9db601..ea726b8fe 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/manage_asgs.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/manage_asgs.yml @@ -1,6 +1,6 @@ --- - name: Get ASG info - ec2_asg_info: + autoscaling_group_info: name: "{{ asg_name }}$" register: asg_info diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_instances.yml b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_instances.yml index b89b38d20..455a9886b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_instances.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_instances.yml @@ -25,7 +25,7 @@ instance_b: "{{ ec2_instance_b.instance_ids[0] }}" - name: Create a Launch Template - ec2_lc: + autoscaling_launch_config: name: "{{ lc_name }}" image_id: "{{ ec2_ami_id }}" security_groups: "{{ sg_a }}" @@ -34,7 +34,7 @@ register: ec2_lc_a - name: Create an ASG - ec2_asg: + autoscaling_group: name: "{{ asg_name }}" load_balancers: - "{{ elb_name_1 }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_vpc.yml b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_vpc.yml index 26fafa41c..60c85b8eb 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_vpc.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_instance/tasks/setup_vpc.yml @@ -32,7 +32,7 @@ register: setup_subnet_2 - name: create a security group - ec2_group: + ec2_security_group: name: '{{ sg_name_1 }}' description: 'created by Ansible integration tests' state: present @@ -45,7 +45,7 @@ register: setup_sg_1 - name: create a security group - ec2_group: + ec2_security_group: name: '{{ sg_name_2 }}' description: 'created by Ansible integration tests' state: present diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml index cf0a13ec4..e277fffd7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -89,7 +89,7 @@ gateway_id: "{{ igw.gateway_id }}" register: route_table - - ec2_group: + - ec2_security_group: name: "{{ resource_prefix }}" description: "security group for Ansible NLB integration tests" state: present @@ -173,7 +173,7 @@ ignore_errors: yes - name: destroy sec group - ec2_group: + ec2_security_group: name: "{{ sec_group.group_name }}" description: "security group for Ansible NLB integration tests" state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml b/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml index b55a0777f..f1e920de8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml @@ -34,7 +34,7 @@ - assert: that: - nlb.changed - - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"' + - nlb.tags.created_by == 'NLB test ' ~ resource_prefix - name: test tags are not removed if unspecified elb_network_lb: @@ -46,7 +46,7 @@ - assert: that: - not nlb.changed - - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"' + - nlb.tags.created_by == 'NLB test ' ~ resource_prefix - name: remove tags from NLB elb_network_lb: diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml b/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml index 06fab22b5..295e5e469 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml @@ -1,17 +1,17 @@ - block: # create instances - - ec2_asg: + - autoscaling_group: state: absent name: "{{ resource_prefix }}-webservers" wait_timeout: 900 - - ec2_lc: + - autoscaling_launch_config: name: "{{ resource_prefix }}-web-lcfg" state: absent - name: Create launch config for testing - ec2_lc: + autoscaling_launch_config: name: "{{ resource_prefix }}-web-lcfg" assign_public_ip: true image_id: "{{ ec2_ami_id }}" @@ -31,7 +31,7 @@ delete_on_termination: true - name: Create autoscaling group for app server fleet - ec2_asg: + autoscaling_group: name: "{{ resource_prefix }}-webservers" vpc_zone_identifier: "{{ nlb_subnets }}" launch_config_name: "{{ resource_prefix }}-web-lcfg" @@ -50,13 +50,13 @@ always: - - ec2_asg: + - autoscaling_group: state: absent name: "{{ resource_prefix }}-webservers" wait_timeout: 900 ignore_errors: yes - - ec2_lc: + - autoscaling_launch_config: name: "{{ resource_prefix }}-web-lcfg" state: absent ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_target/files/ansible_lambda_target.py b/ansible_collections/community/aws/tests/integration/targets/elb_target/files/ansible_lambda_target.py index 3ea22472e..d652d6097 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_target/files/ansible_lambda_target.py +++ b/ansible_collections/community/aws/tests/integration/targets/elb_target/files/ansible_lambda_target.py @@ -1,10 +1,10 @@ -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import json def lambda_handler(event, context): - return { - 'statusCode': 200, - 'body': json.dumps('Hello from Lambda!') - } + return {"statusCode": 200, "body": json.dumps("Hello from Lambda!")} diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/alb_target.yml b/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/alb_target.yml index d3638a63c..446b59031 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/alb_target.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/alb_target.yml @@ -51,7 +51,7 @@ register: route_table - name: create testing security group - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ vpc.vpc.id }}" @@ -177,7 +177,7 @@ ignore_errors: true - name: remove testing security group - ec2_group: + ec2_security_group: state: absent name: "{{ resource_prefix }}-sg" register: removed diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/ec2_target.yml b/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/ec2_target.yml index 611aca26f..20931f1d7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/ec2_target.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/ec2_target.yml @@ -58,7 +58,7 @@ register: route_table - name: create testing security group - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ vpc.vpc.id }}" @@ -147,7 +147,7 @@ - result.health_check_protocol == 'TCP' - '"tags" in result' - '"target_group_arn" in result' - - result.target_group_name == "{{ tg_name }}-nlb" + - result.target_group_name == tg_name ~ '-nlb' - result.target_type == 'instance' - result.deregistration_delay_timeout_seconds == '60' - result.deregistration_delay_connection_termination_enabled @@ -214,7 +214,7 @@ - '"load_balancer_arn" in result' - '"tags" in result' - result.type == 'network' - - result.vpc_id == '{{ vpc.vpc.id }}' + - result.vpc_id == vpc.vpc.id - name: modify up testing target group for NLB (preserve_client_ip_enabled=false) elb_target_group: @@ -603,7 +603,7 @@ ignore_errors: true - name: remove testing security group - ec2_group: + ec2_security_group: state: absent name: "{{ resource_prefix }}-sg" register: removed diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/main.yml index e99118c64..8f03edfa8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_target/tasks/main.yml @@ -2,9 +2,9 @@ - name: set up elb_target test prerequisites module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - community.general diff --git a/ansible_collections/community/aws/tests/integration/targets/elb_target_info/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/elb_target_info/tasks/main.yml index fc11cdbcd..fadce2135 100644 --- a/ansible_collections/community/aws/tests/integration/targets/elb_target_info/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/elb_target_info/tasks/main.yml @@ -2,9 +2,9 @@ - name: set up elb_target_info test prerequisites module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws @@ -66,7 +66,7 @@ register: route_table - name: create testing security group - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ vpc.vpc.id }}" @@ -207,9 +207,9 @@ - assert: that: - - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" - - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" - - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" + - "nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" + - "idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" - (target_facts.instance_target_groups | length) == 2 msg: "target facts showed the target in the right target groups" @@ -228,9 +228,9 @@ - assert: that: - - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" - - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" - - "{{ idle_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" + - "nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" + - "idle_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" - (target_facts.instance_target_groups | length) == 3 msg: "target facts reflected the addition of the target to the idle group" @@ -242,9 +242,9 @@ - assert: that: - - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" - - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" - - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}" + - "alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" + - "nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" + - "idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn'))" - (target_facts.instance_target_groups | length) == 2 msg: "target_facts.instance_target_groups did not gather unused target groups when variable was set" @@ -407,7 +407,7 @@ ignore_errors: true - name: remove testing security group - ec2_group: + ec2_security_group: state: absent name: "{{ resource_prefix }}-sg" description: a security group for ansible tests diff --git a/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/main.yml index 837f9bd17..c11b297af 100644 --- a/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - include_tasks: test_connection_network.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_jdbc.yml b/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_jdbc.yml index 966d8156f..a3b052ba9 100644 --- a/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_jdbc.yml +++ b/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_jdbc.yml @@ -5,7 +5,7 @@ # TODO: description, match_criteria, security_groups, and subnet_id are unused module options - name: create glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" connection_properties: JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}" @@ -19,7 +19,7 @@ - result.changed - name: test idempotence creating glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" connection_properties: JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}" @@ -33,7 +33,7 @@ - not result.changed - name: test updating JDBC connection url - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" connection_properties: JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}-updated" @@ -47,7 +47,7 @@ - result.changed - name: delete glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" state: absent register: result @@ -57,7 +57,7 @@ - result.changed - name: test idempotence removing glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" state: absent register: result @@ -69,6 +69,6 @@ always: - name: delete glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_network.yml b/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_network.yml index 230015585..bc7d5cb4c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_network.yml +++ b/ansible_collections/community/aws/tests/integration/targets/glue_connection/tasks/test_connection_network.yml @@ -26,7 +26,7 @@ register: glue_subnet_a - name: Create security group 1 - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg-glue-1" description: A security group for Ansible tests vpc_id: "{{ glue_vpc.vpc.id }}" @@ -37,7 +37,7 @@ rule_desc: Connections from Glue - name: Create security group 2 - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg-glue-2" description: A security group for Ansible tests vpc_id: "{{ glue_vpc.vpc.id }}" @@ -48,7 +48,7 @@ rule_desc: Connections from Glue - name: Create Glue connection (check mode) - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" availability_zone: "{{ aws_region }}a" connection_properties: @@ -69,7 +69,7 @@ - glue_connection_check.description is not defined - name: Create Glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" availability_zone: "{{ aws_region }}a" connection_properties: @@ -109,7 +109,7 @@ - glue_connection.raw_connection_properties == connection_info["Connection"]["ConnectionProperties"] - name: Create Glue connection (idempotent) (check mode) - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" availability_zone: "{{ aws_region }}a" connection_properties: @@ -149,7 +149,7 @@ - connection_info_idempotent_check["Connection"]["PhysicalConnectionRequirements"]["AvailabilityZone"] == connection_info["Connection"]["PhysicalConnectionRequirements"]["AvailabilityZone"] - name: Create Glue connection (idempotent) - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" availability_zone: "{{ aws_region }}a" connection_properties: @@ -188,7 +188,7 @@ - connection_info_idempotent["Connection"]["PhysicalConnectionRequirements"]["AvailabilityZone"] == connection_info["Connection"]["PhysicalConnectionRequirements"]["AvailabilityZone"] - name: Update Glue connection (check mode) - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" availability_zone: "{{ aws_region }}a" connection_properties: @@ -229,7 +229,7 @@ - glue_connection_update_check.raw_connection_properties == connection_info_update_check["Connection"]["ConnectionProperties"] - name: Update Glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" availability_zone: "{{ aws_region }}a" connection_properties: @@ -269,7 +269,7 @@ - glue_connection_update.raw_connection_properties == connection_info_update["Connection"]["ConnectionProperties"] - name: Delete Glue connection (check mode) - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" state: absent check_mode: true @@ -295,7 +295,7 @@ - connection_info["Connection"]["Name"] == connection_info_delete_check["Connection"]["Name"] - name: Delete Glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" state: absent register: glue_connection_delete @@ -307,17 +307,17 @@ always: - name: Delete Glue connection - aws_glue_connection: + glue_connection: name: "{{ resource_prefix }}" state: absent ignore_errors: true - name: Delete security group 1 - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg-glue-1" state: absent ignore_errors: true - name: Delete security group 2 - ec2_group: + ec2_security_group: name: "{{ resource_prefix }}-sg-glue-2" state: absent ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/glue_crawler/aliases b/ansible_collections/community/aws/tests/integration/targets/glue_crawler/aliases index 4ef4b2067..21fa9fd98 100644 --- a/ansible_collections/community/aws/tests/integration/targets/glue_crawler/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/glue_crawler/aliases @@ -1 +1,4 @@ cloud/aws + +disabled +# https://github.com/ansible-collections/community.aws/issues/1796 diff --git a/ansible_collections/community/aws/tests/integration/targets/glue_crawler/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/glue_crawler/tasks/main.yml index b96968195..82ff4addf 100644 --- a/ansible_collections/community/aws/tests/integration/targets/glue_crawler/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/glue_crawler/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: @@ -29,7 +29,7 @@ seconds: 10 - name: Create Glue crawler (check mode) - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" database_name: my_database description: "{{ glue_crawler_description }}" @@ -56,7 +56,7 @@ - glue_crawler_check.description is not defined - name: Create Glue crawler - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" database_name: my_database description: "{{ glue_crawler_description }}" @@ -102,7 +102,7 @@ - glue_crawler.targets.S3Targets == crawler_info["Crawler"]["Targets"]["S3Targets"] - name: Create Glue crawler (idempotent) (check mode) - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" database_name: my_database description: "{{ glue_crawler_description }}" @@ -149,7 +149,7 @@ - crawler_info["Crawler"]["Targets"]["S3Targets"] == crawler_info_idempotent_check["Crawler"]["Targets"]["S3Targets"] - name: Create Glue crawler (idempotent) - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" database_name: my_database description: "{{ glue_crawler_description }}" @@ -195,7 +195,7 @@ - crawler_info["Crawler"]["Targets"]["S3Targets"] == crawler_info_idempotent["Crawler"]["Targets"]["S3Targets"] - name: Update Glue crawler (check mode) - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" database_name: my_database_2 description: "{{ glue_crawler_description }}" @@ -242,7 +242,7 @@ - glue_crawler_update_check.targets.S3Targets == crawler_info_update_check["Crawler"]["Targets"]["S3Targets"] - name: Update Glue crawler - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" database_name: my_database_2 description: "{{ glue_crawler_description }}" @@ -288,7 +288,7 @@ - glue_crawler_update.targets.S3Targets == crawler_info_update["Crawler"]["Targets"]["S3Targets"] - name: Delete Glue crawler (check mode) - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" state: absent check_mode: true @@ -315,7 +315,7 @@ - crawler_info["Crawler"]["Name"] == crawler_info_delete_check["Crawler"]["Name"] - name: Delete Glue crawler - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" state: absent register: glue_crawler_delete @@ -327,7 +327,7 @@ always: - name: Delete Glue crawler - aws_glue_crawler: + glue_crawler: name: "{{ glue_crawler_name }}" state: absent ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/glue_job/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/glue_job/tasks/main.yml index 307a9befb..85080fd02 100644 --- a/ansible_collections/community/aws/tests/integration/targets/glue_job/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/glue_job/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: # AWS CLI is needed until there's a module to get info about Glue jobs @@ -30,7 +30,7 @@ - "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess" - name: Create Glue job (check mode) - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" command_python_version: 3 command_script_location: "{{ glue_job_command_script_location }}" @@ -53,7 +53,7 @@ - glue_job_check.description is not defined - name: Create Glue job - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" command_python_version: 3 command_script_location: "{{ glue_job_command_script_location }}" @@ -93,7 +93,7 @@ - glue_job.role == job_info["Job"]["Role"] - name: Create Glue job (idempotent) (check mode) - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" command_python_version: 3 command_script_location: "{{ glue_job_command_script_location }}" @@ -135,7 +135,7 @@ - job_info["Job"]["Role"] == job_info_idempotent_check["Job"]["Role"] - name: Create Glue job (idempotent) - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" command_python_version: 3 command_script_location: "{{ glue_job_command_script_location }}" @@ -176,7 +176,7 @@ - job_info["Job"]["Role"] == job_info_idempotent["Job"]["Role"] - name: Update Glue job (check mode) - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" command_python_version: 2 command_script_location: "{{ glue_job_command_script_location }}" @@ -216,7 +216,7 @@ - glue_job_update_check.role == job_info_update_check["Job"]["Role"] - name: Update Glue job - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" command_python_version: 2 command_script_location: "{{ glue_job_command_script_location }}" @@ -255,7 +255,7 @@ - glue_job_update.role == job_info_update["Job"]["Role"] - name: Delete Glue job (check mode) - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" state: absent check_mode: true @@ -281,7 +281,7 @@ - job_info["Job"]["Name"] == job_info_delete_check["Job"]["Name"] - name: Delete Glue job - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" state: absent register: glue_job_delete @@ -293,7 +293,7 @@ always: - name: Delete Glue job - aws_glue_job: + glue_job: name: "{{ glue_job_name }}" state: absent ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_access_key/defaults/main.yml deleted file mode 100644 index eaaa3523e..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -test_user: '{{ resource_prefix }}' diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_access_key/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_access_key/tasks/main.yml deleted file mode 100644 index a7fcc633c..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_access_key/tasks/main.yml +++ /dev/null @@ -1,808 +0,0 @@ ---- -- name: AWS AuthN details - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - collections: - - amazon.aws - - community.aws - block: - # ================================================================================== - # Preparation - # ================================================================================== - # We create an IAM user with no attached permissions. The *only* thing the - # user will be able to do is call sts.get_caller_identity - # https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html - - name: Create test user - iam_user: - name: '{{ test_user }}' - state: present - register: iam_user - - - assert: - that: - - iam_user is successful - - iam_user is changed - - # ================================================================================== - - - name: Fetch IAM key info (no keys) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 0 - - # ================================================================================== - - - name: Create a key (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - state: present - register: create_key_1 - check_mode: true - - - assert: - that: - - create_key_1 is successful - - create_key_1 is changed - - - name: Create a key - iam_access_key: - user_name: '{{ test_user }}' - state: present - register: create_key_1 - - - assert: - that: - - create_key_1 is successful - - create_key_1 is changed - - '"access_key" in create_key_1' - - '"secret_access_key" in create_key_1' - - '"deleted_access_key_id" not in create_key_1' - - '"access_key_id" in create_key_1.access_key' - - '"create_date" in create_key_1.access_key' - - '"user_name" in create_key_1.access_key' - - '"status" in create_key_1.access_key' - - create_key_1.access_key.user_name == test_user - - create_key_1.access_key.status == 'Active' - - - name: Fetch IAM key info (1 key) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 1 - - '"access_key_id" in access_key_1' - - '"create_date" in access_key_1' - - '"user_name" in access_key_1' - - '"status" in access_key_1' - - access_key_1.user_name == test_user - - access_key_1.access_key_id == create_key_1.access_key.access_key_id - - access_key_1.create_date == create_key_1.access_key.create_date - - access_key_1.status == 'Active' - vars: - access_key_1: '{{ access_key_info.access_keys[0] }}' - - # ================================================================================== - - - name: Create a second key (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - state: present - register: create_key_2 - check_mode: true - - - assert: - that: - - create_key_2 is successful - - create_key_2 is changed - - - name: Create a second key - iam_access_key: - user_name: '{{ test_user }}' - state: present - register: create_key_2 - - - assert: - that: - - create_key_2 is successful - - create_key_2 is changed - - '"access_key" in create_key_2' - - '"secret_access_key" in create_key_2' - - '"deleted_access_key_id" not in create_key_2' - - '"access_key_id" in create_key_2.access_key' - - '"create_date" in create_key_2.access_key' - - '"user_name" in create_key_2.access_key' - - '"status" in create_key_2.access_key' - - create_key_2.access_key.user_name == test_user - - create_key_2.access_key.status == 'Active' - - - name: Fetch IAM key info (2 keys) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 2 - - '"access_key_id" in access_key_1' - - '"create_date" in access_key_1' - - '"user_name" in access_key_1' - - '"status" in access_key_1' - - access_key_1.user_name == test_user - - access_key_1.access_key_id == create_key_1.access_key.access_key_id - - access_key_1.create_date == create_key_1.access_key.create_date - - access_key_1.status == 'Active' - - '"access_key_id" in access_key_2' - - '"create_date" in access_key_2' - - '"user_name" in access_key_2' - - '"status" in access_key_2' - - access_key_2.user_name == test_user - - access_key_2.access_key_id == create_key_2.access_key.access_key_id - - access_key_2.create_date == create_key_2.access_key.create_date - - access_key_2.status == 'Active' - vars: - access_key_1: '{{ access_key_info.access_keys[0] }}' - access_key_2: '{{ access_key_info.access_keys[1] }}' - - # ================================================================================== - - # We don't block the attempt to create a third access key - should AWS change - # the limits this will "JustWork". - - # - name: Create a third key (check_mode) - # iam_access_key: - # user_name: '{{ test_user }}' - # state: present - # register: create_key_3 - # ignore_errors: True - # check_mode: true - - # - assert: - # that: - # - create_key_3 is successful - # - create_key_3 is changed - - - name: Create a third key without rotation - iam_access_key: - user_name: '{{ test_user }}' - state: present - register: create_key_3 - ignore_errors: True - - - assert: - that: - # If Amazon update the limits we may need to change the expectation here. - - create_key_3 is failed - - - name: Fetch IAM key info (2 keys - not changed) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 2 - - '"access_key_id" in access_key_1' - - '"create_date" in access_key_1' - - '"user_name" in access_key_1' - - '"status" in access_key_1' - - access_key_1.user_name == test_user - - access_key_1.access_key_id == create_key_1.access_key.access_key_id - - access_key_1.create_date == create_key_1.access_key.create_date - - access_key_1.status == 'Active' - - '"access_key_id" in access_key_2' - - '"create_date" in access_key_2' - - '"user_name" in access_key_2' - - '"status" in access_key_2' - - access_key_2.user_name == test_user - - access_key_2.access_key_id == create_key_2.access_key.access_key_id - - access_key_2.create_date == create_key_2.access_key.create_date - - access_key_2.status == 'Active' - vars: - access_key_1: '{{ access_key_info.access_keys[0] }}' - access_key_2: '{{ access_key_info.access_keys[1] }}' - - # ================================================================================== - - - name: Create a third key - rotation enabled (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - state: present - rotate_keys: true - register: create_key_3 - check_mode: true - - - assert: - that: - - create_key_3 is successful - - create_key_3 is changed - - '"deleted_access_key_id" in create_key_3' - - create_key_3.deleted_access_key_id == create_key_1.access_key.access_key_id - - - name: Create a second key - iam_access_key: - user_name: '{{ test_user }}' - state: present - rotate_keys: true - register: create_key_3 - - - assert: - that: - - create_key_3 is successful - - create_key_3 is changed - - '"access_key" in create_key_3' - - '"secret_access_key" in create_key_3' - - '"deleted_access_key_id" in create_key_3' - - create_key_3.deleted_access_key_id == create_key_1.access_key.access_key_id - - '"access_key_id" in create_key_3.access_key' - - '"create_date" in create_key_3.access_key' - - '"user_name" in create_key_3.access_key' - - '"status" in create_key_3.access_key' - - create_key_3.access_key.user_name == test_user - - create_key_3.access_key.status == 'Active' - - - name: Fetch IAM key info (2 keys - oldest rotated) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 2 - - '"access_key_id" in access_key_1' - - '"create_date" in access_key_1' - - '"user_name" in access_key_1' - - '"status" in access_key_1' - - access_key_1.user_name == test_user - - access_key_1.access_key_id == create_key_2.access_key.access_key_id - - access_key_1.create_date == create_key_2.access_key.create_date - - access_key_1.status == 'Active' - - '"access_key_id" in access_key_2' - - '"create_date" in access_key_2' - - '"user_name" in access_key_2' - - '"status" in access_key_2' - - access_key_2.user_name == test_user - - access_key_2.access_key_id == create_key_3.access_key.access_key_id - - access_key_2.create_date == create_key_3.access_key.create_date - - access_key_2.status == 'Active' - vars: - access_key_1: '{{ access_key_info.access_keys[0] }}' - access_key_2: '{{ access_key_info.access_keys[1] }}' - - # ================================================================================== - - - name: Disable third key (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: False - register: disable_key - check_mode: true - - - assert: - that: - - disable_key is successful - - disable_key is changed - - - name: Disable third key - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: False - register: disable_key - - - assert: - that: - - disable_key is successful - - disable_key is changed - - '"access_key" in disable_key' - - '"secret_access_key" not in disable_key' - - '"deleted_access_key_id" not in disable_key' - - '"access_key_id" in disable_key.access_key' - - '"create_date" in disable_key.access_key' - - '"user_name" in disable_key.access_key' - - '"status" in disable_key.access_key' - - disable_key.access_key.user_name == test_user - - disable_key.access_key.status == 'Inactive' - - - name: Disable third key - idempotency (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: False - register: disable_key - check_mode: true - - - assert: - that: - - disable_key is successful - - disable_key is not changed - - - name: Disable third key - idempotency - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: False - register: disable_key - - - assert: - that: - - disable_key is successful - - disable_key is not changed - - '"access_key" in disable_key' - - '"secret_access_key" not in disable_key' - - '"deleted_access_key_id" not in disable_key' - - '"access_key_id" in disable_key.access_key' - - '"create_date" in disable_key.access_key' - - '"user_name" in disable_key.access_key' - - '"status" in disable_key.access_key' - - disable_key.access_key.user_name == test_user - - disable_key.access_key.status == 'Inactive' - - - name: Fetch IAM key info (2 keys - 1 disabled) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 2 - - '"access_key_id" in access_key_1' - - '"create_date" in access_key_1' - - '"user_name" in access_key_1' - - '"status" in access_key_1' - - access_key_1.user_name == test_user - - access_key_1.access_key_id == create_key_2.access_key.access_key_id - - access_key_1.create_date == create_key_2.access_key.create_date - - access_key_1.status == 'Active' - - '"access_key_id" in access_key_2' - - '"create_date" in access_key_2' - - '"user_name" in access_key_2' - - '"status" in access_key_2' - - access_key_2.user_name == test_user - - access_key_2.access_key_id == create_key_3.access_key.access_key_id - - access_key_2.create_date == create_key_3.access_key.create_date - - access_key_2.status == 'Inactive' - vars: - access_key_1: '{{ access_key_info.access_keys[0] }}' - access_key_2: '{{ access_key_info.access_keys[1] }}' - - # ================================================================================== - - - name: Touch third key - no change (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - register: touch_key - check_mode: true - - - assert: - that: - - touch_key is successful - - touch_key is not changed - - - name: Touch third key - no change - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - register: touch_key - - - assert: - that: - - touch_key is successful - - touch_key is not changed - - '"access_key" in touch_key' - - '"secret_access_key" not in touch_key' - - '"deleted_access_key_id" not in touch_key' - - '"access_key_id" in touch_key.access_key' - - '"create_date" in touch_key.access_key' - - '"user_name" in touch_key.access_key' - - '"status" in touch_key.access_key' - - touch_key.access_key.user_name == test_user - - touch_key.access_key.status == 'Inactive' - - # ================================================================================== - - - name: Enable third key (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: True - register: enable_key - check_mode: true - - - assert: - that: - - enable_key is successful - - enable_key is changed - - - name: Enable third key - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: True - register: enable_key - - - assert: - that: - - enable_key is successful - - enable_key is changed - - '"access_key" in enable_key' - - '"secret_access_key" not in enable_key' - - '"deleted_access_key_id" not in enable_key' - - '"access_key_id" in enable_key.access_key' - - '"create_date" in enable_key.access_key' - - '"user_name" in enable_key.access_key' - - '"status" in enable_key.access_key' - - enable_key.access_key.user_name == test_user - - enable_key.access_key.status == 'Active' - - - name: Enable third key - idempotency (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: True - register: enable_key - check_mode: true - - - assert: - that: - - enable_key is successful - - enable_key is not changed - - - name: Enable third key - idempotency - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: True - register: enable_key - - - assert: - that: - - enable_key is successful - - enable_key is not changed - - '"access_key" in enable_key' - - '"secret_access_key" not in enable_key' - - '"deleted_access_key_id" not in enable_key' - - '"access_key_id" in enable_key.access_key' - - '"create_date" in enable_key.access_key' - - '"user_name" in enable_key.access_key' - - '"status" in enable_key.access_key' - - enable_key.access_key.user_name == test_user - - enable_key.access_key.status == 'Active' - - # ================================================================================== - - - name: Touch third key again - no change (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - register: touch_key - check_mode: true - - - assert: - that: - - touch_key is successful - - touch_key is not changed - - - name: Touch third key again - no change - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - register: touch_key - - - assert: - that: - - touch_key is successful - - touch_key is not changed - - '"access_key" in touch_key' - - '"secret_access_key" not in touch_key' - - '"deleted_access_key_id" not in touch_key' - - '"access_key_id" in touch_key.access_key' - - '"create_date" in touch_key.access_key' - - '"user_name" in touch_key.access_key' - - '"status" in touch_key.access_key' - - touch_key.access_key.user_name == test_user - - touch_key.access_key.status == 'Active' - - # ================================================================================== - - - name: Re-Disable third key - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - enabled: False - register: redisable_key - - - assert: - that: - - redisable_key is successful - - redisable_key is changed - - redisable_key.access_key.status == 'Inactive' - - - pause: - seconds: 10 - - # ================================================================================== - - - name: Test GetCallerIdentity - Key 2 - aws_caller_info: - aws_access_key: "{{ create_key_2.access_key.access_key_id }}" - aws_secret_key: "{{ create_key_2.secret_access_key }}" - security_token: "{{ omit }}" - register: caller_identity_2 - - - assert: - that: - - caller_identity_2 is successful - - caller_identity_2.arn == iam_user.iam_user.user.arn - - - name: Test GetCallerIdentity - Key 1 (gone) - aws_caller_info: - aws_access_key: "{{ create_key_1.access_key.access_key_id }}" - aws_secret_key: "{{ create_key_1.secret_access_key }}" - security_token: "{{ omit }}" - register: caller_identity_1 - ignore_errors: true - - - assert: - that: - - caller_identity_1 is failed - - caller_identity_1.error.code == 'InvalidClientTokenId' - - - name: Test GetCallerIdentity - Key 3 (disabled) - aws_caller_info: - aws_access_key: "{{ create_key_3.access_key.access_key_id }}" - aws_secret_key: "{{ create_key_3.secret_access_key }}" - security_token: "{{ omit }}" - register: caller_identity_3 - ignore_errors: true - - - assert: - that: - - caller_identity_3 is failed - - caller_identity_3.error.code == 'InvalidClientTokenId' - - # ================================================================================== - - - name: Delete active key (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_2.access_key.access_key_id }}' - state: absent - register: delete_active_key - check_mode: true - - - assert: - that: - - delete_active_key is successful - - delete_active_key is changed - - - name: Delete active key - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_2.access_key.access_key_id }}' - state: absent - register: delete_active_key - - - assert: - that: - - delete_active_key is successful - - delete_active_key is changed - - - name: Delete active key - idempotency (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_2.access_key.access_key_id }}' - state: absent - register: delete_active_key - check_mode: true - - - assert: - that: - - delete_active_key is successful - - delete_active_key is not changed - - - name: Delete active key - idempotency - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_2.access_key.access_key_id }}' - state: absent - register: delete_active_key - - - assert: - that: - - delete_active_key is successful - - delete_active_key is not changed - - # ================================================================================== - - - name: Delete inactive key (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - state: absent - register: delete_inactive_key - check_mode: true - - - assert: - that: - - delete_inactive_key is successful - - delete_inactive_key is changed - - - name: Delete inactive key - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - state: absent - register: delete_inactive_key - - - assert: - that: - - delete_inactive_key is successful - - delete_inactive_key is changed - - - name: Delete inactive key - idempotency (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - state: absent - register: delete_inactive_key - check_mode: true - - - assert: - that: - - delete_inactive_key is successful - - delete_inactive_key is not changed - - - name: Delete inactive key - idempotency - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_3.access_key.access_key_id }}' - state: absent - register: delete_inactive_key - - - assert: - that: - - delete_inactive_key is successful - - delete_inactive_key is not changed - - # ================================================================================== - - - name: Fetch IAM key info (no keys) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 0 - - # ================================================================================== - - - name: Create an inactive key (check_mode) - iam_access_key: - user_name: '{{ test_user }}' - state: present - enabled: false - register: create_key_4 - check_mode: true - - - assert: - that: - - create_key_4 is successful - - create_key_4 is changed - - - name: Create a key - iam_access_key: - user_name: '{{ test_user }}' - state: present - enabled: false - register: create_key_4 - - - assert: - that: - - create_key_4 is successful - - create_key_4 is changed - - '"access_key" in create_key_4' - - '"secret_access_key" in create_key_4' - - '"deleted_access_key_id" not in create_key_4' - - '"access_key_id" in create_key_4.access_key' - - '"create_date" in create_key_4.access_key' - - '"user_name" in create_key_4.access_key' - - '"status" in create_key_4.access_key' - - create_key_4.access_key.user_name == test_user - - create_key_4.access_key.status == 'Inactive' - - - name: Fetch IAM key info (1 inactive key) - iam_access_key_info: - user_name: '{{ test_user }}' - register: access_key_info - - - assert: - that: - - access_key_info is successful - - '"access_keys" in access_key_info' - - access_key_info.access_keys | length == 1 - - '"access_key_id" in access_key_1' - - '"create_date" in access_key_1' - - '"user_name" in access_key_1' - - '"status" in access_key_1' - - access_key_1.user_name == test_user - - access_key_1.access_key_id == create_key_4.access_key.access_key_id - - access_key_1.create_date == create_key_4.access_key.create_date - - access_key_1.status == 'Inactive' - vars: - access_key_1: '{{ access_key_info.access_keys[0] }}' - - # We already tested the idempotency of disabling keys, use this to verify that - # the key is disabled - - name: Disable new key - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_4.access_key.access_key_id }}' - enabled: False - register: disable_new_key - - - assert: - that: - - disable_new_key is successful - - disable_new_key is not changed - - '"access_key" in disable_new_key' - - # ================================================================================== - # Cleanup - - - name: Delete new key - iam_access_key: - user_name: '{{ test_user }}' - id: '{{ create_key_4.access_key.access_key_id }}' - state: absent - register: delete_new_key - - - assert: - that: - - delete_new_key is successful - - delete_new_key is changed - - - name: Remove test user - iam_user: - name: '{{ test_user }}' - state: absent - register: delete_user - - - assert: - that: - - delete_user is successful - - delete_user is changed - - always: - - - name: Remove test user - iam_user: - name: '{{ test_user }}' - state: absent - ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_group/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_group/defaults/main.yml deleted file mode 100644 index f5112b1a4..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_group/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -test_user: '{{ resource_prefix }}-user' -test_group: '{{ resource_prefix }}-group' diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_group/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_group/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_group/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_group/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_group/tasks/main.yml deleted file mode 100644 index 65b441827..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_group/tasks/main.yml +++ /dev/null @@ -1,127 +0,0 @@ ---- -- name: set up aws connection info - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - collections: - - amazon.aws - block: - - name: ensure ansible user exists - iam_user: - name: '{{ test_user }}' - state: present - - - name: ensure group exists - iam_group: - name: '{{ test_group }}' - users: - - '{{ test_user }}' - state: present - register: iam_group - - - assert: - that: - - iam_group.iam_group.users - - iam_group is changed - - - name: add non existent user to group - iam_group: - name: '{{ test_group }}' - users: - - '{{ test_user }}' - - NonExistentUser - state: present - ignore_errors: yes - register: iam_group - - - name: assert that adding non existent user to group fails with helpful message - assert: - that: - - iam_group is failed - - iam_group.msg.startswith("Couldn't add user NonExistentUser to group {{ test_group }}") - - - name: remove a user - iam_group: - name: '{{ test_group }}' - purge_users: True - users: [] - state: present - register: iam_group - - - assert: - that: - - iam_group is changed - - not iam_group.iam_group.users - - - name: re-remove a user (no change) - iam_group: - name: '{{ test_group }}' - purge_users: True - users: [] - state: present - register: iam_group - - - assert: - that: - - iam_group is not changed - - not iam_group.iam_group.users - - - name: Add the user again - iam_group: - name: '{{ test_group }}' - users: - - '{{ test_user }}' - state: present - register: iam_group - - - assert: - that: - - iam_group is changed - - iam_group.iam_group.users - - - name: Re-add the user - iam_group: - name: '{{ test_group }}' - users: - - '{{ test_user }}' - state: present - register: iam_group - - - assert: - that: - - iam_group is not changed - - iam_group.iam_group.users - - - name: remove group - iam_group: - name: '{{ test_group }}' - state: absent - register: iam_group - - - assert: - that: - - iam_group is changed - - - name: re-remove group - iam_group: - name: '{{ test_group }}' - state: absent - register: iam_group - - - assert: - that: - - iam_group is not changed - - always: - - name: remove group - iam_group: - name: '{{ test_group }}' - state: absent - - - name: remove ansible user - iam_user: - name: '{{ test_user }}' - state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml deleted file mode 100644 index a6edcacef..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -policy_name: "{{ resource_prefix }}-policy" diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml deleted file mode 100644 index f17b7cad0..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml +++ /dev/null @@ -1,160 +0,0 @@ ---- -- name: "Run integration tests for IAM managed policy" - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - collections: - - amazon.aws - block: - ## Test policy creation - - name: Create IAM managed policy - check mode - iam_managed_policy: - policy_name: "{{ policy_name }}" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Deny" - Action: "logs:CreateLogGroup" - Resource: "*" - state: present - register: result - check_mode: yes - - - name: Create IAM managed policy - check mode - assert: - that: - - result.changed - - - name: Create IAM managed policy - iam_managed_policy: - policy_name: "{{ policy_name }}" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Deny" - Action: "logs:CreateLogGroup" - Resource: "*" - state: present - register: result - - - name: Create IAM managed policy - assert: - that: - - result.changed - - result.policy.policy_name == policy_name - - - name: Create IAM managed policy - idempotency check - iam_managed_policy: - policy_name: "{{ policy_name }}" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Deny" - Action: "logs:CreateLogGroup" - Resource: "*" - state: present - register: result - - - name: Create IAM managed policy - idempotency check - assert: - that: - - not result.changed - - ## Test policy update - - name: Update IAM managed policy - check mode - iam_managed_policy: - policy_name: "{{ policy_name }}" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Deny" - Action: "logs:Describe*" - Resource: "*" - state: present - register: result - check_mode: yes - - - name: Update IAM managed policy - check mode - assert: - that: - - result.changed - - - name: Update IAM managed policy - iam_managed_policy: - policy_name: "{{ policy_name }}" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Deny" - Action: "logs:Describe*" - Resource: "*" - state: present - register: result - - - name: Update IAM managed policy - assert: - that: - - result.changed - - result.policy.policy_name == policy_name - - - name: Update IAM managed policy - idempotency check - iam_managed_policy: - policy_name: "{{ policy_name }}" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Deny" - Action: "logs:Describe*" - Resource: "*" - state: present - register: result - - - name: Update IAM managed policy - idempotency check - assert: - that: - - not result.changed - - ## Test policy deletion - - name: Delete IAM managed policy - check mode - iam_managed_policy: - policy_name: "{{ policy_name }}" - state: absent - register: result - check_mode: yes - - - name: Delete IAM managed policy - check mode - assert: - that: - - result.changed - - - name: Delete IAM managed policy - iam_managed_policy: - policy_name: "{{ policy_name }}" - state: absent - register: result - - - name: Delete IAM managed policy - assert: - that: - - result.changed - - - name: Delete IAM managed policy - idempotency check - iam_managed_policy: - policy_name: "{{ policy_name }}" - state: absent - register: result - - - name: Delete IAM managed policy - idempotency check - assert: - that: - - not result.changed - - always: - - name: Delete IAM managed policy - iam_managed_policy: - policy_name: "{{ policy_name }}" - state: absent - ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml b/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml deleted file mode 100644 index 7b773eac8..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml +++ /dev/null @@ -1,107 +0,0 @@ -- module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - collections: - - amazon.aws - block: - - name: set iam password policy - iam_password_policy: - state: present - min_pw_length: 8 - require_symbols: false - require_numbers: true - require_uppercase: true - require_lowercase: true - allow_pw_change: true - pw_max_age: 60 - pw_reuse_prevent: 5 - pw_expire: false - register: result - - - name: assert that changes were made - assert: - that: - - result.changed - - - name: verify iam password policy has been created - iam_password_policy: - state: present - min_pw_length: 8 - require_symbols: false - require_numbers: true - require_uppercase: true - require_lowercase: true - allow_pw_change: true - pw_max_age: 60 - pw_reuse_prevent: 5 - pw_expire: false - register: result - - - name: assert that no changes were made - assert: - that: - - not result.changed - - - name: update iam password policy with different settings - iam_password_policy: - state: present - min_pw_length: 15 - require_symbols: true - require_numbers: true - require_uppercase: true - require_lowercase: true - allow_pw_change: true - pw_max_age: 30 - pw_reuse_prevent: 10 - pw_expire: true - register: result - - - name: assert that updates were made - assert: - that: - - result.changed - - # Test for regression of #59102 - - name: update iam password policy without expiry - iam_password_policy: - state: present - min_pw_length: 15 - require_symbols: true - require_numbers: true - require_uppercase: true - require_lowercase: true - allow_pw_change: true - register: result - - - name: assert that changes were made - assert: - that: - - result.changed - - - name: remove iam password policy - iam_password_policy: - state: absent - register: result - - - name: assert password policy has been removed - assert: - that: - - result.changed - - - name: verify password policy has been removed - iam_password_policy: - state: absent - register: result - - - name: assert no changes were made - assert: - that: - - not result.changed - always: - - name: remove iam password policy - iam_password_policy: - state: absent - register: result diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/defaults/main.yml deleted file mode 100644 index d496c4216..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -test_role: '{{ resource_prefix }}-role' -test_path: '/{{ resource_prefix }}/' -safe_managed_policy: 'AWSDenyAll' -custom_policy_name: '{{ resource_prefix }}-denyall' -boundary_policy: 'arn:aws:iam::aws:policy/AWSDenyAll' diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml deleted file mode 100644 index 89a983f15..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml +++ /dev/null @@ -1,94 +0,0 @@ ---- -- name: "Create minimal role with no boundary policy" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - -- name: "Configure Boundary Policy (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - boundary: "{{ boundary_policy }}" - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Configure Boundary Policy" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - boundary: "{{ boundary_policy }}" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - -- name: "Configure Boundary Policy (no change) - check mode" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - boundary: "{{ boundary_policy }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Configure Boundary Policy (no change)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - boundary: "{{ boundary_policy }}" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after adding boundary policy" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - '"description" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 0 - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 3600 - - role_info.iam_roles[0].path == '/' - - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy - - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - -- name: "Remove IAM Role" - iam_role: - state: absent - name: "{{ test_role }}" - delete_instance_profile: yes - register: iam_role - -- assert: - that: - - iam_role is changed
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml deleted file mode 100644 index c23234ebf..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -- name: "Complex IAM Role (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' - boundary: "{{ boundary_policy }}" - create_instance_profile: no - description: "Ansible Test Role {{ resource_prefix }}" - managed_policy: - - "{{ safe_managed_policy }}" - - "{{ custom_policy_name }}" - max_session_duration: 43200 - path: "{{ test_path }}" - tags: - TagA: "ValueA" - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "iam_role_info after Complex Role creation in check_mode" - iam_role_info: - name: "{{ test_role }}" - register: role_info -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 0 - -- name: "Complex IAM Role" - iam_role: - name: "{{ test_role }}" - assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' - boundary: "{{ boundary_policy }}" - create_instance_profile: no - description: "Ansible Test Role {{ resource_prefix }}" - managed_policy: - - "{{ safe_managed_policy }}" - - "{{ custom_policy_name }}" - max_session_duration: 43200 - path: "{{ test_path }}" - tags: - TagA: "ValueA" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - 'iam_role.iam_role.arn.startswith("arn")' - - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )' - # Would be nice to test the contents... - - '"assume_role_policy_document" in iam_role.iam_role' - - iam_role.iam_role.attached_policies | length == 2 - - iam_role.iam_role.max_session_duration == 43200 - - iam_role.iam_role.path == test_path - - iam_role.iam_role.role_name == test_role - - '"create_date" in iam_role.iam_role' - - '"role_id" in iam_role.iam_role' - -- name: "Complex IAM role (no change) - check mode" - iam_role: - name: "{{ test_role }}" - assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' - boundary: "{{ boundary_policy }}" - create_instance_profile: no - description: "Ansible Test Role {{ resource_prefix }}" - managed_policy: - - "{{ safe_managed_policy }}" - - "{{ custom_policy_name }}" - max_session_duration: 43200 - path: "{{ test_path }}" - tags: - TagA: "ValueA" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Complex IAM role (no change)" - iam_role: - name: "{{ test_role }}" - assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' - boundary: "{{ boundary_policy }}" - create_instance_profile: no - description: "Ansible Test Role {{ resource_prefix }}" - managed_policy: - - "{{ safe_managed_policy }}" - - "{{ custom_policy_name }}" - max_session_duration: 43200 - path: "{{ test_path }}" - tags: - TagA: "ValueA" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after Role creation" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 0 - - role_info.iam_roles[0].managed_policies | length == 2 - - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == test_path - - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy - - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - '"TagA" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagA == "ValueA" diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml deleted file mode 100644 index 0579a6d34..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml +++ /dev/null @@ -1,404 +0,0 @@ ---- -- name: Try running some rapid fire create/delete tests - block: - - name: "Minimal IAM Role without instance profile (rapid)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role - - - name: "Minimal IAM Role without instance profile (rapid)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role_again - - - assert: - that: - - iam_role is changed - - iam_role_again is not changed - - - name: "Remove IAM Role (rapid)" - iam_role: - state: absent - name: "{{ test_role }}" - register: iam_role - - - name: "Remove IAM Role (rapid)" - iam_role: - state: absent - name: "{{ test_role }}" - register: iam_role_again - - - assert: - that: - - iam_role is changed - - iam_role_again is not changed - - - name: "Minimal IAM Role without instance profile (rapid)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role - - - name: "Remove IAM Role (rapid)" - iam_role: - state: absent - name: "{{ test_role }}" - - register: iam_role_again - - assert: - that: - - iam_role is changed - - iam_role_again is changed - -# =================================================================== -# Role Creation -# (without Instance profile) -- name: "iam_role_info before Role creation (no args)" - iam_role_info: - register: role_info - -- assert: - that: - - role_info is succeeded - -- name: "iam_role_info before Role creation (search for test role)" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 0 - -- name: "Minimal IAM Role (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is changed - -- name: "iam_role_info after Role creation in check_mode" - iam_role_info: - name: "{{ test_role }}" - register: role_info -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 0 - -- name: "Minimal IAM Role without instance profile" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - 'iam_role.iam_role.arn.startswith("arn")' - - 'iam_role.iam_role.arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in iam_role.iam_role' - - '"assume_role_policy_document_raw" in iam_role.iam_role' - - iam_role.iam_role.assume_role_policy_document_raw == assume_deny_policy - - iam_role.iam_role.attached_policies | length == 0 - - iam_role.iam_role.max_session_duration == 3600 - - iam_role.iam_role.path == '/' - - iam_role.iam_role.role_name == test_role - - '"create_date" in iam_role.iam_role' - - '"role_id" in iam_role.iam_role' - -- name: "Minimal IAM Role without instance profile (no change) - check mode" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Minimal IAM Role without instance profile (no change)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: no - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after Role creation" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"assume_role_policy_document_raw" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - '"description" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].assume_role_policy_document_raw == assume_deny_policy - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 0 - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 3600 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 0 - -- name: "Remove IAM Role" - iam_role: - state: absent - name: "{{ test_role }}" - delete_instance_profile: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "iam_role_info after Role deletion" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 0 - -# ------------------------------------------------------------------------------------------ - -# (with path) -- name: "Minimal IAM Role with path (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - path: "{{ test_path }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is changed - -- name: "Minimal IAM Role with path" - iam_role: - name: "{{ test_role }}" - path: "{{ test_path }}" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - 'iam_role.iam_role.arn.startswith("arn")' - - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )' - # Would be nice to test the contents... - - '"assume_role_policy_document" in iam_role.iam_role' - - iam_role.iam_role.attached_policies | length == 0 - - iam_role.iam_role.max_session_duration == 3600 - - iam_role.iam_role.path == '{{ test_path }}' - - iam_role.iam_role.role_name == test_role - - '"create_date" in iam_role.iam_role' - - '"role_id" in iam_role.iam_role' - -- name: "Minimal IAM Role with path (no change) - check mode" - iam_role: - name: "{{ test_role }}" - path: "{{ test_path }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Minimal IAM Role with path (no change)" - iam_role: - name: "{{ test_role }}" - path: "{{ test_path }}" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after Role creation" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - '"description" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 3600 - - role_info.iam_roles[0].path == '{{ test_path }}' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 0 - -- name: "iam_role_info after Role creation (searching a path)" - iam_role_info: - path_prefix: "{{ test_path }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - '"description" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 3600 - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].path == '{{ test_path }}' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 0 - -- name: "Remove IAM Role" - iam_role: - state: absent - name: "{{ test_role }}" - path: "{{ test_path }}" - # If we don't delete the existing profile it'll be reused (with the path) - # by the test below. - delete_instance_profile: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "iam_role_info after Role deletion" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 0 - -# ------------------------------------------------------------------------------------------ - -# (with Instance profile) -- name: "Minimal IAM Role with instance profile - check mode" - iam_role: - name: "{{ test_role }}" - create_instance_profile: yes - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is changed - -- name: "Minimal IAM Role with instance profile" - iam_role: - name: "{{ test_role }}" - create_instance_profile: yes - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - 'iam_role.iam_role.arn.startswith("arn")' - - 'iam_role.iam_role.arn.endswith("role/" + test_role )' - # Would be nice to test the contents... - - '"assume_role_policy_document" in iam_role.iam_role' - - iam_role.iam_role.attached_policies | length == 0 - - iam_role.iam_role.max_session_duration == 3600 - - iam_role.iam_role.path == '/' - - iam_role.iam_role.role_name == test_role - - '"create_date" in iam_role.iam_role' - - '"role_id" in iam_role.iam_role' - -- name: "Minimal IAM Role wth instance profile (no change) - check mode" - iam_role: - name: "{{ test_role }}" - create_instance_profile: yes - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Minimal IAM Role wth instance profile (no change)" - iam_role: - name: "{{ test_role }}" - create_instance_profile: yes - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after Role creation" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - '"description" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 3600 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 0 diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/description_update.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/description_update.yml deleted file mode 100644 index 85f5e1f56..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/description_update.yml +++ /dev/null @@ -1,148 +0,0 @@ ---- -- name: "Add Description (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role {{ resource_prefix }}" - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Add Description" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role {{ resource_prefix }}" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}' - -- name: "Add Description (no change) - check mode" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role {{ resource_prefix }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Add Description (no change)" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role {{ resource_prefix }}" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}' - -- name: "iam_role_info after adding Description" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 0 - -# ------------------------------------------------------------------------------------------ - -- name: "Update Description (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role (updated) {{ resource_prefix }}" - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Update Description" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role (updated) {{ resource_prefix }}" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}' - -- name: "Update Description (no change) - check mode" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role (updated) {{ resource_prefix }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Update Description (no change)" - iam_role: - name: "{{ test_role }}" - description: "Ansible Test Role (updated) {{ resource_prefix }}" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}' - -- name: "iam_role_info after updating Description" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 0 diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml deleted file mode 100644 index d364d87d7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- name: "Attach inline policy a" - iam_policy: - state: present - iam_type: "role" - iam_name: "{{ test_role }}" - policy_name: "inline-policy-a" - policy_json: '{{ lookup("file", "deny-all-a.json") }}' - -- name: "Attach inline policy b" - iam_policy: - state: present - iam_type: "role" - iam_name: "{{ test_role }}" - policy_name: "inline-policy-b" - policy_json: '{{ lookup("file", "deny-all-b.json") }}' - -- name: "iam_role_info after attaching inline policies (using iam_policy)" - iam_role_info: - name: "{{ test_role }}" - register: role_info -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 2 - - '"inline-policy-a" in role_info.iam_roles[0].inline_policies' - - '"inline-policy-b" in role_info.iam_roles[0].inline_policies' - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 1 - - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 1 - - '"TagB" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/main.yml deleted file mode 100644 index ae47ada1a..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/main.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -# Tests for iam_role and iam_role_info -# -# Tests: -# - Minimal Role creation -# - Role deletion -# - Fetching a specific role -# - Creating roles w/ and w/o instance profiles -# - Creating roles w/ a path -# - Updating Max Session Duration -# - Updating Description -# - Managing list of managed policies -# - Managing list of inline policies (for testing _info) -# - Managing boundary policy -# -# Notes: -# - Only tests *documented* return values ( RESULT.iam_role ) -# - There are some known timing issues with boto3 returning before actions -# complete in the case of problems with "changed" status it's worth enabling -# the standard_pauses and paranoid_pauses options as a first step in debugging - - -- name: "Setup AWS connection info" - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - iam_role: - assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' - collections: - - amazon.aws - - community.general - block: - - set_fact: - assume_deny_policy: '{{ lookup("file", "deny-assume.json") | from_json }}' - # =================================================================== - # Parameter Checks - - include_tasks: parameter_checks.yml - - # =================================================================== - # Supplemental resource pre-creation - - name: "Create Safe IAM Managed Policy" - iam_managed_policy: - state: present - policy_name: "{{ custom_policy_name }}" - policy_description: "A safe (deny-all) managed policy" - policy: "{{ lookup('file', 'deny-all.json') }}" - register: create_managed_policy - - - assert: - that: - - create_managed_policy is succeeded - - # =================================================================== - # Rapid Role Creation and deletion - - include_tasks: creation_deletion.yml - - # =================================================================== - # Max Session Duration Manipulation - - include_tasks: max_session_update.yml - - # =================================================================== - # Description Manipulation - - include_tasks: description_update.yml - - # =================================================================== - # Tag Manipulation - - include_tasks: tags_update.yml - - # =================================================================== - # Policy Manipulation - - include_tasks: policy_update.yml - - # =================================================================== - # Inline Policy (test _info behavior) - - include_tasks: inline_policy_update.yml - - # =================================================================== - # Role Removal - - include_tasks: role_removal.yml - - # =================================================================== - # Boundary Policy (requires create_instance_profile: no) - - include_tasks: boundary_policy.yml - - # =================================================================== - # Complex role Creation - - include_tasks: complex_role_creation.yml - - always: - # =================================================================== - # Cleanup - - - name: "Remove IAM Role" - iam_role: - state: absent - name: "{{ test_role }}" - delete_instance_profile: yes - ignore_errors: true - - - name: "Remove IAM Role (with path)" - iam_role: - state: absent - name: "{{ test_role }}" - path: "{{ test_path }}" - delete_instance_profile: yes - ignore_errors: true - - - name: "iam_role_info after Role deletion" - iam_role_info: - name: "{{ test_role }}" - ignore_errors: true - - - name: "Remove test managed policy" - iam_managed_policy: - state: absent - policy_name: "{{ custom_policy_name }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml deleted file mode 100644 index 8ad3641be..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -- name: "Update Max Session Duration (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - max_session_duration: 43200 - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Update Max Session Duration" - iam_role: - name: "{{ test_role }}" - max_session_duration: 43200 - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - iam_role.iam_role.max_session_duration == 43200 - -- name: "Update Max Session Duration (no change)" - iam_role: - name: "{{ test_role }}" - max_session_duration: 43200 - register: iam_role - -- assert: - that: - - iam_role is not changed - -- name: "Update Max Session Duration (no change) - check mode" - iam_role: - name: "{{ test_role }}" - max_session_duration: 43200 - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "iam_role_info after updating Max Session Duration" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - '"description" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 0 diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml deleted file mode 100644 index 57df5436a..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -# Parameter Checks -- name: "Friendly message when creating an instance profile and adding a boundary profile" - iam_role: - name: "{{ test_role }}" - boundary: "{{ boundary_policy }}" - register: iam_role - ignore_errors: yes - -- assert: - that: - - iam_role is failed - - '"boundary policy" in iam_role.msg' - - '"create_instance_profile" in iam_role.msg' - - '"false" in iam_role.msg' - -- name: "Friendly message when boundary profile is not an ARN" - iam_role: - name: "{{ test_role }}" - boundary: "AWSDenyAll" - create_instance_profile: no - register: iam_role - ignore_errors: yes - -- assert: - that: - - iam_role is failed - - '"Boundary policy" in iam_role.msg' - - '"ARN" in iam_role.msg' - -- name: 'Friendly message when "present" without assume_role_policy_document' - module_defaults: { iam_role: {} } - iam_role: - name: "{{ test_role }}" - register: iam_role - ignore_errors: yes - -- assert: - that: - - iam_role is failed - - 'iam_role.msg.startswith("state is present but all of the following are missing")' - - '"assume_role_policy_document" in iam_role.msg' - -- name: "Maximum Session Duration needs to be between 1 and 12 hours" - iam_role: - name: "{{ test_role }}" - max_session_duration: 3599 - register: iam_role - ignore_errors: yes - -- assert: - that: - - iam_role is failed - - '"max_session_duration must be between" in iam_role.msg' - -- name: "Maximum Session Duration needs to be between 1 and 12 hours" - iam_role: - name: "{{ test_role }}" - max_session_duration: 43201 - register: iam_role - ignore_errors: yes - -- assert: - that: - - iam_role is failed - - '"max_session_duration must be between" in iam_role.msg' - -- name: "Role Paths must start with /" - iam_role: - name: "{{ test_role }}" - path: "test/" - register: iam_role - ignore_errors: yes - -- assert: - that: - - iam_role is failed - - '"path must begin and end with /" in iam_role.msg' - -- name: "Role Paths must end with /" - iam_role: - name: "{{ test_role }}" - path: "/test" - register: iam_role - ignore_errors: yes - -- assert: - that: - - iam_role is failed - - '"path must begin and end with /" in iam_role.msg' diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/policy_update.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/policy_update.yml deleted file mode 100644 index a822edf74..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/policy_update.yml +++ /dev/null @@ -1,250 +0,0 @@ ---- -- name: "Add Managed Policy (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ safe_managed_policy }}" - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Add Managed Policy" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ safe_managed_policy }}" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - -- name: "Add Managed Policy (no change) - check mode" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ safe_managed_policy }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Add Managed Policy (no change)" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ safe_managed_policy }}" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after adding Managed Policy" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 1 - - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - custom_policy_name not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 1 - - '"TagB" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagB == "ValueB" - -# ------------------------------------------------------------------------------------------ - -- name: "Update Managed Policy without purge (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ custom_policy_name }}" - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Update Managed Policy without purge" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ custom_policy_name }}" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - -- name: "Update Managed Policy without purge (no change) - check mode" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ custom_policy_name }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Update Managed Policy without purge (no change)" - iam_role: - name: "{{ test_role }}" - purge_policies: no - managed_policy: - - "{{ custom_policy_name }}" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after updating Managed Policy without purge" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 2 - - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 1 - - '"TagB" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagB == "ValueB" - -# ------------------------------------------------------------------------------------------ - -# Managed Policies are purged by default -- name: "Update Managed Policy with purge (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - managed_policy: - - "{{ custom_policy_name }}" - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Update Managed Policy with purge" - iam_role: - name: "{{ test_role }}" - managed_policy: - - "{{ custom_policy_name }}" - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - -- name: "Update Managed Policy with purge (no change) - check mode" - iam_role: - name: "{{ test_role }}" - managed_policy: - - "{{ custom_policy_name }}" - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Update Managed Policy with purge (no change)" - iam_role: - name: "{{ test_role }}" - managed_policy: - - "{{ custom_policy_name }}" - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - -- name: "iam_role_info after updating Managed Policy with purge" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 1 - - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 1 - - '"TagB" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/role_removal.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/role_removal.yml deleted file mode 100644 index ebcfd5453..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/role_removal.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: "Remove IAM Role (CHECK MODE)" - iam_role: - state: absent - name: "{{ test_role }}" - delete_instance_profile: yes - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "iam_role_info after deleting role in check mode" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - -- name: "Remove IAM Role" - iam_role: - state: absent - name: "{{ test_role }}" - delete_instance_profile: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "iam_role_info after deleting role" - iam_role_info: - name: "{{ test_role }}" - register: role_info -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 0 - -- name: "Remove IAM Role (should be gone already) - check mode" - iam_role: - state: absent - name: "{{ test_role }}" - delete_instance_profile: yes - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Remove IAM Role (should be gone already)" - iam_role: - state: absent - name: "{{ test_role }}" - delete_instance_profile: yes - register: iam_role - -- assert: - that: - - iam_role is not changed diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/tags_update.yml b/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/tags_update.yml deleted file mode 100644 index 5eadd9fdf..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/tags_update.yml +++ /dev/null @@ -1,341 +0,0 @@ ---- -- name: "Add Tag (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - tags: - TagA: ValueA - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Add Tag" - iam_role: - name: "{{ test_role }}" - tags: - TagA: ValueA - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - iam_role.iam_role.tags | length == 1 - - '"TagA" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagA == "ValueA" - -- name: "Add Tag (no change) - check mode" - iam_role: - name: "{{ test_role }}" - tags: - TagA: ValueA - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Add Tag (no change)" - iam_role: - name: "{{ test_role }}" - tags: - TagA: ValueA - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - - '"TagA" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagA == "ValueA" - -- name: "iam_role_info after adding Tags" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 1 - - '"TagA" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagA == "ValueA" - -# ------------------------------------------------------------------------------------------ - -- name: "Update Tag (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - tags: - TagA: AValue - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Update Tag" - iam_role: - name: "{{ test_role }}" - tags: - TagA: AValue - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - '"TagA" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagA == "AValue" - -- name: "Update Tag (no change) - check mode" - iam_role: - name: "{{ test_role }}" - tags: - TagA: AValue - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Update Tag (no change)" - iam_role: - name: "{{ test_role }}" - tags: - TagA: AValue - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - - '"TagA" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagA == "AValue" - -- name: "iam_role_info after updating Tag" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 1 - - '"TagA" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagA == "AValue" - -# ------------------------------------------------------------------------------------------ - -- name: "Add second Tag without purge (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - purge_tags: no - tags: - TagB: ValueB - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Add second Tag without purge" - iam_role: - name: "{{ test_role }}" - purge_tags: no - tags: - TagB: ValueB - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - '"TagB" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagB == "ValueB" - -- name: "Add second Tag without purge (no change) - check mode" - iam_role: - name: "{{ test_role }}" - purge_tags: no - tags: - TagB: ValueB - register: iam_role - check_mode: yes - -- assert: - that: - - iam_role is not changed - -- name: "Add second Tag without purge (no change)" - iam_role: - name: "{{ test_role }}" - purge_tags: no - tags: - TagB: ValueB - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - - '"TagB" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagB == "ValueB" - -- name: "iam_role_info after adding second Tag without purge" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 2 - - '"TagA" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagA == "AValue" - - '"TagB" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagB == "ValueB" - -# ------------------------------------------------------------------------------------------ - -- name: "Purge first tag (CHECK MODE)" - iam_role: - name: "{{ test_role }}" - purge_tags: yes - tags: - TagB: ValueB - check_mode: yes - register: iam_role - -- assert: - that: - - iam_role is changed - -- name: "Purge first tag" - iam_role: - name: "{{ test_role }}" - purge_tags: yes - tags: - TagB: ValueB - register: iam_role - -- assert: - that: - - iam_role is changed - - iam_role.iam_role.role_name == test_role - - '"TagB" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagB == "ValueB" - -- name: "Purge first tag (no change) - check mode" - iam_role: - name: "{{ test_role }}" - purge_tags: yes - tags: - TagB: ValueB - register: iam_role - -- assert: - that: - - iam_role is not changed - -- name: "Purge first tag (no change)" - iam_role: - name: "{{ test_role }}" - purge_tags: yes - tags: - TagB: ValueB - register: iam_role - -- assert: - that: - - iam_role is not changed - - iam_role.iam_role.role_name == test_role - - '"TagB" in iam_role.iam_role.tags' - - iam_role.iam_role.tags.TagB == "ValueB" - -- name: "iam_role_info after purging first Tag" - iam_role_info: - name: "{{ test_role }}" - register: role_info - -- assert: - that: - - role_info is succeeded - - role_info.iam_roles | length == 1 - - 'role_info.iam_roles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' - - '"assume_role_policy_document" in role_info.iam_roles[0]' - - '"create_date" in role_info.iam_roles[0]' - - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' - - role_info.iam_roles[0].inline_policies | length == 0 - - role_info.iam_roles[0].instance_profiles | length == 1 - - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role - - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' - - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' - - role_info.iam_roles[0].managed_policies | length == 0 - - role_info.iam_roles[0].max_session_duration == 43200 - - role_info.iam_roles[0].path == '/' - - '"permissions_boundary" not in role_info.iam_roles[0]' - - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id - - role_info.iam_roles[0].role_name == test_role - - role_info.iam_roles[0].tags | length == 1 - - '"TagA" not in role_info.iam_roles[0].tags' - - '"TagB" in role_info.iam_roles[0].tags' - - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml index b061fc601..3098d4811 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml @@ -1,9 +1,9 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - amazon.aws block: diff --git a/ansible_collections/community/aws/tests/integration/targets/iam_server_certificate/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/iam_server_certificate/tasks/main.yml index 0cfab38c8..d50ebfe52 100644 --- a/ansible_collections/community/aws/tests/integration/targets/iam_server_certificate/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/iam_server_certificate/tasks/main.yml @@ -11,9 +11,9 @@ # - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: ################################################ diff --git a/ansible_collections/community/aws/tests/integration/targets/inspector_target/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/inspector_target/tasks/main.yml index 907e1ffdd..a32e3bd68 100644 --- a/ansible_collections/community/aws/tests/integration/targets/inspector_target/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/inspector_target/tasks/main.yml @@ -4,14 +4,14 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: - name: Create AWS Inspector Target Group - aws_inspector_target: + inspector_target: name: "{{ aws_inspector_scan_name }}" state: present tags: @@ -20,7 +20,7 @@ register: target_group_create - name: Create AWS Inspector Target Group (Verify) - aws_inspector_target: + inspector_target: name: "{{ aws_inspector_scan_name }}" state: present tags: @@ -41,7 +41,7 @@ - target_group_create_verify.tags.changed == "no" - name: Change AWS Inspector Target Group Tags - aws_inspector_target: + inspector_target: name: "{{ aws_inspector_scan_name }}" state: present tags: @@ -50,7 +50,7 @@ register: target_group_tag_change - name: Change AWS Inspector Target Group Tags (Verify) - aws_inspector_target: + inspector_target: name: "{{ aws_inspector_scan_name }}" state: present tags: @@ -72,13 +72,13 @@ always: - name: Delete AWS Inspector Target Group - aws_inspector_target: + inspector_target: name: "{{ aws_inspector_scan_name }}" state: absent register: target_group_delete - name: Delete AWS Inspector Target Group (Verify) - aws_inspector_target: + inspector_target: name: "{{ aws_inspector_scan_name }}" state: absent register: target_group_delete_verify diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/aliases b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/aliases new file mode 100644 index 000000000..d528335bb --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/aliases @@ -0,0 +1,2 @@ +time=20m +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/meta/main.yml index 32cf5dda7..32cf5dda7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/meta/main.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/create_inventory_config.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/create_inventory_config.yml new file mode 100644 index 000000000..f91a9fba3 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/create_inventory_config.yml @@ -0,0 +1,16 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + + vars: + template_name: "../templates/{{ template | default('inventory.j2') }}" + + vars_files: + - vars/main.yml + + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_mq.yml + content: "{{ lookup('template', template_name) }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/empty_inventory_config.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/empty_inventory_config.yml new file mode 100644 index 000000000..6bc277e2a --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/empty_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_mq.yml + content: "" diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/populate_cache.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/populate_cache.yml new file mode 100644 index 000000000..dff6ede2f --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/populate_cache.yml @@ -0,0 +1,32 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + + environment: "{{ ansible_test.environment }}" + + module_defaults: + group/aws: + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + + collections: + - community.aws + + vars_files: + - vars/main.yml + + tasks: + - name: refresh inventory to populate cache + meta: refresh_inventory + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_mq' in groups" + - "groups.aws_mq | length == 1" + + - name: Delete MQ instance + include_tasks: tasks/mq_instance_delete.yml
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/setup_instance.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/setup_instance.yml new file mode 100644 index 000000000..fcea9cd8c --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/setup_instance.yml @@ -0,0 +1,29 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + + vars: + env_vars: + AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' + AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' + AWS_DEFAULT_REGION: '{{ aws_region }}' + AWS_SECURITY_TOKEN: '{{ security_token }}' + + environment: "{{ ansible_test.environment | combine(env_vars) }}" + + module_defaults: + group/aws: + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + + collections: + - community.aws + + vars_files: + - vars/main.yml + + tasks: + - include_tasks: 'tasks/mq_instance_{{ operation }}.yml' diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/find_broker.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/find_broker.yml new file mode 100644 index 000000000..e5f76d0a5 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/find_broker.yml @@ -0,0 +1,10 @@ +--- +- name: Find broker by name + community.aws.mq_broker_info: + broker_name: "{{ broker_name }}" + register: broker_info + failed_when: false + +- name: Find broker by name, if exists + set_fact: + broker_exists: "{{ not (('Invalid type for parameter BrokerId, value: None' in broker_info.msg) | bool) }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_create.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_create.yml new file mode 100644 index 000000000..88f60c093 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_create.yml @@ -0,0 +1,27 @@ +--- +# using command module until #1832 is resolved +- include_tasks: find_broker.yml +- block: + - name: Get engine versions + command: > + aws mq describe-broker-engine-types --engine {{ engine }} + register: describe_engine_result + + - name: Select latest engine version + set_fact: + engine_version: "{{ ( describe_engine_result.stdout | from_json ).BrokerEngineTypes[0].EngineVersions | map(attribute='Name') | sort | max }}" + + - name: Create minimal MQ instance in default VPC and default subnet group + command: > + aws mq create-broker + --broker-name {{ broker_name }} + --deployment-mode SINGLE_INSTANCE + --engine-type {{ engine }} + --engine-version {{ engine_version }} + {% if resource_tags is defined %}--tags '{{ resource_tags | to_json }}'{% endif %} + --host-instance-type mq.t3.micro + --users=ConsoleAccess=True,Groups=admin,Password=aODvFQAt4tt1W,Username=master + --auto-minor-version-upgrade + --no-publicly-accessible + when: + - not broker_exists
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_delete.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_delete.yml new file mode 100644 index 000000000..b533ee86b --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/tasks/mq_instance_delete.yml @@ -0,0 +1,13 @@ +--- +- name: remove broker instance + community.aws.mq_broker: + state: absent + engine_type: "{{ engine }}" + broker_name: '{{ broker_name }}' + register: delete_result + failed_when: + - delete_result.get('failed',false) + - (delete_result.get('message','')).find('be deleted while in state [CREATION_IN_PROGRESS]') == -1 + until: (delete_result.get('message','')).find('be deleted while in state [CREATION_IN_PROGRESS]') == -1 + retries: 150 + delay: 60 diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_invalid_aws_mq_inventory_config.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_invalid_aws_mq_inventory_config.yml new file mode 100644 index 000000000..c982d0d9e --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_invalid_aws_mq_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert inventory was not populated by aws_mq inventory plugin + assert: + that: + - "'aws_mq' not in groups" diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_cache.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_cache.yml new file mode 100644 index 000000000..8926cefa2 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_cache.yml @@ -0,0 +1,18 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert cache was used to populate inventory + assert: + that: + - "'aws_mq' in groups" + - "groups.aws_mq | length == 1" + + - meta: refresh_inventory + + - name: assert refresh_inventory updated the cache + assert: + that: + - "'aws_mq' in groups" + - "not groups.aws_mq" diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_no_hosts.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_no_hosts.yml new file mode 100644 index 000000000..4873adc92 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_no_hosts.yml @@ -0,0 +1,16 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + + environment: "{{ ansible_test.environment }}" + + collections: + - community.aws + tasks: + - debug: var=groups + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_mq' in groups" + - groups.aws_mq | length == 0 diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml new file mode 100644 index 000000000..2db7f76ab --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml @@ -0,0 +1,30 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + + environment: "{{ ansible_test.environment }}" + + collections: + - community.aws + + vars_files: + - vars/main.yml + + tasks: + + - name: assert the hostvars are defined with prefix and/or suffix + assert: + that: + - "hostvars[broker_name][vars_prefix ~ 'host_instance_type' ~ vars_suffix] == 'mq.t3.micro'" + - "hostvars[broker_name][vars_prefix ~ 'engine_type' ~ vars_suffix] == engine" + - "hostvars[broker_name][vars_prefix ~ 'broker_state' ~ vars_suffix] in ('CREATION_IN_PROGRESS', 'RUNNING')" + - "'host_instance_type' not in hostvars[broker_name]" + - "'engine_type' not in hostvars[broker_name]" + - "'broker_state' not in hostvars[broker_name]" + - "'ansible_diff_mode' in hostvars[broker_name]" + - "'ansible_forks' in hostvars[broker_name]" + - "'ansible_version' in hostvars[broker_name]" + vars: + vars_prefix: "{{ inventory_prefix | default('') }}" + vars_suffix: "{{ inventory_suffix | default('') }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml new file mode 100644 index 000000000..a71043c70 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml @@ -0,0 +1,17 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + + environment: "{{ ansible_test.environment }}" + + vars_files: + - vars/main.yml + + tasks: + - name: assert aws_mq inventory group contains MQ instance created by previous playbook + assert: + that: + - "'aws_mq' in groups" + - "groups.aws_mq | length == 1" + - groups.aws_mq.0 == broker_name diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory_with_constructed.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory_with_constructed.yml new file mode 100644 index 000000000..8d840158f --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory_with_constructed.yml @@ -0,0 +1,27 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + + environment: "{{ ansible_test.environment }}" + collections: + - community.aws + + vars_files: + - vars/main.yml + + tasks: + + - debug: + var: groups + + - name: assert the keyed groups from constructed config were added to inventory + assert: + that: + # There are 5 groups: all, ungrouped, aws_mq, tag and engine_type keyed group + - "groups | length == 5" + - '"all" in groups' + - '"ungrouped" in groups' + - '"aws_mq" in groups' + - '"tag_workload_type_other" in groups' + - '"mq_ACTIVEMQ" in groups' diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/vars/main.yml b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/vars/main.yml new file mode 100644 index 000000000..2f599201c --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/playbooks/vars/main.yml @@ -0,0 +1,6 @@ +--- +broker_name: "{{ resource_prefix }}-activemq" +engine: "ACTIVEMQ" +resource_tags: + workload_type: other +aws_inventory_cache_dir: "" diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/runme.sh b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/runme.sh new file mode 100755 index 000000000..68a3eda4b --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/runme.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +set -eux + +function cleanup() { + ansible-playbook playbooks/setup_instance.yml -e "operation=delete" "$@" + exit 1 +} + +trap 'cleanup "${@}"' ERR + +# ensure test config is empty +ansible-playbook playbooks/empty_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY_ENABLED="community.aws.aws_mq" + +# test with default inventory file +ansible-playbook playbooks/test_invalid_aws_mq_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY=test.aws_mq.yml + +# test empty inventory config +ansible-playbook playbooks/test_invalid_aws_mq_inventory_config.yml "$@" + +# delete existing resources +ansible-playbook playbooks/setup_instance.yml -e "operation=delete" "$@" + +# generate inventory config and test using it +ansible-playbook playbooks/create_inventory_config.yml "$@" + +# test inventory with no hosts +ansible-playbook playbooks/test_inventory_no_hosts.yml "$@" + +# create MQ resources +ansible-playbook playbooks/setup_instance.yml -e "operation=create" "$@" + +# test inventory populated with MQ instance +ansible-playbook playbooks/test_populating_inventory.yml "$@" + +# generate inventory config with constructed features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@" + +# generate inventory config with hostvars_prefix features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_mq_'" "$@" +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_mq_'" "$@" + +# generate inventory config with hostvars_suffix features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_suffix='_aws_mq'" "$@" +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_suffix='_aws_mq'" "$@" + +# generate inventory config with hostvars_prefix and hostvars_suffix features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_'" -e "inventory_suffix='_mq'" "$@" +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_'" -e "inventory_suffix='_mq'" "$@" + +# generate inventory config with statuses and test using it +ansible-playbook playbooks/create_inventory_config.yml -e '{"inventory_statuses": true}' "$@" +ansible-playbook playbooks/test_inventory_no_hosts.yml "$@" + +# generate inventory config with caching and test using it +AWS_MQ_CACHE_DIR="aws_mq_cache_dir" +rm -rf "${AWS_MQ_CACHE_DIR}" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.j2'" -e "aws_inventory_cache_dir=$AWS_MQ_CACHE_DIR" "$@" +ansible-playbook playbooks/populate_cache.yml "$@" +ansible-playbook playbooks/test_inventory_cache.yml "$@" +rm -rf "${AWS_MQ_CACHE_DIR}" + +# cleanup inventory config +ansible-playbook playbooks/empty_inventory_config.yml "$@" + +ansible-playbook playbooks/setup_instance.yml -e "operation=delete" "$@" + diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory.j2 b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory.j2 new file mode 100644 index 000000000..25fa80918 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory.j2 @@ -0,0 +1,12 @@ +plugin: community.aws.aws_mq +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +session_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' +{% if inventory_statuses | default(false) %} +statuses: + - CREATION_FAILED +{% endif %} diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_cache.j2 b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_cache.j2 new file mode 100644 index 000000000..10941a8d5 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_cache.j2 @@ -0,0 +1,11 @@ +plugin: community.aws.aws_mq +cache: True +cache_plugin: jsonfile +cache_connection: '{{ aws_inventory_cache_dir }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +session_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_constructed.j2 b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_constructed.j2 new file mode 100644 index 000000000..7b421ace4 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_constructed.j2 @@ -0,0 +1,13 @@ +plugin: community.aws.aws_mq +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +session_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' +keyed_groups: + - key: tags + prefix: tag + - key: engine_type + prefix: mq diff --git a/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_hostvars_prefix_suffix.j2 b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_hostvars_prefix_suffix.j2 new file mode 100644 index 000000000..13bc6ffa8 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/inventory_aws_mq/templates/inventory_with_hostvars_prefix_suffix.j2 @@ -0,0 +1,14 @@ +plugin: community.aws.aws_mq +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +session_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' +{% if inventory_prefix | default(false) %} +hostvars_prefix: '{{ inventory_prefix }}' +{% endif %} +{% if inventory_suffix | default(false) %} +hostvars_suffix: '{{ inventory_suffix }}' +{% endif %} diff --git a/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml index b6791fb06..f219f0ae6 100644 --- a/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml @@ -3,9 +3,9 @@ - name: 'Setup AWS Module Defaults' module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' kinesis_stream: # Number of shards is mandatory when state=present @@ -23,13 +23,13 @@ # Note: Because we're not a producer / consumer we don't actually need # access to the keys - name: 'Create KMS key 1' - aws_kms: + kms_key: alias: '{{ kms_cmk_alias_1 }}' state: present enabled: yes register: create_kms_1 - name: 'Create KMS key 2' - aws_kms: + kms_key: alias: '{{ kms_cmk_alias_2 }}' state: present enabled: yes @@ -680,7 +680,7 @@ block: - name: 'Delete the KMS keys' ignore_errors: yes - aws_kms: + kms_key: state: absent alias: '{{ item }}' loop: diff --git a/ansible_collections/community/aws/tests/integration/targets/legacy_missing_tests/aliases b/ansible_collections/community/aws/tests/integration/targets/legacy_missing_tests/aliases index 27c4351c4..edfaa127e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/legacy_missing_tests/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/legacy_missing_tests/aliases @@ -5,9 +5,6 @@ application_scaling_policy batch_compute_environment batch_job_definition batch_job_queue -cloudfront_distribution_info -cloudfront_invalidation -cloudfront_origin_access_identity data_pipeline directconnect_confirm_connection directconnect_connection diff --git a/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml index 91f13a8ba..18e76756d 100644 --- a/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml @@ -2,10 +2,10 @@ - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' block: @@ -15,8 +15,14 @@ lightsail: name: "{{ instance_name }}" zone: "{{ zone }}" - blueprint_id: amazon_linux + blueprint_id: amazon_linux_2 bundle_id: nano_2_0 + public_ports: + - from_port: 50 + to_port: 50 + protocol: "tcp" + cidrs: ["0.0.0.0/0"] + ipv6_cidrs: ["::/0"] wait: yes register: result @@ -25,8 +31,10 @@ - result.changed == True - "'instance' in result and result.instance.name == instance_name" - "result.instance.state.name == 'running'" + - "result.instance.networking.ports[0].from_port == 50" + - result.instance.networking.ports|length == 1 - - name: Make sure create is idempotent + - name: Check if it does not delete public ports config when no value is provided lightsail: name: "{{ instance_name }}" zone: "{{ zone }}" @@ -38,6 +46,24 @@ that: - result.changed == False + - name: Make sure create is idempotent + lightsail: + name: "{{ instance_name }}" + zone: "{{ zone }}" + blueprint_id: amazon_linux_2 + bundle_id: nano_2_0 + public_ports: + - from_port: 50 + to_port: 50 + protocol: "tcp" + cidrs: ["0.0.0.0/0"] + ipv6_cidrs: ["::/0"] + register: result + + - assert: + that: + - result.changed == False + - name: Start the running instance lightsail: name: "{{ instance_name }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/aliases b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/defaults/main.yml new file mode 100644 index 000000000..5866de4ec --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/defaults/main.yml @@ -0,0 +1,3 @@ +instance_name: "{{ resource_prefix }}_instance" +snapshot_name: "{{ resource_prefix }}_instance_snapshot" +zone: "{{ aws_region }}a" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/meta/main.yml index 32cf5dda7..32cf5dda7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/meta/main.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/tasks/main.yml new file mode 100644 index 000000000..98553d278 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/lightsail_snapshot/tasks/main.yml @@ -0,0 +1,85 @@ +--- + +- module_defaults: + group/aws: + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + + block: + + # ==== Tests =================================================== + + - name: Create a new instance + lightsail: + name: "{{ instance_name }}" + zone: "{{ zone }}" + blueprint_id: amazon_linux_2 + bundle_id: nano_2_0 + wait: yes + + - name: Create a new snapshot + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + instance_name: "{{ instance_name }}" + region: "{{ aws_region }}" + wait: yes + register: result + + - assert: + that: + - result.changed == True + - "'instance_snapshot' in result and result.instance_snapshot.name == snapshot_name" + - "result.instance_snapshot.state == 'available'" + + - name: Make sure instance snapshot creation is idempotent + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + instance_name: "{{ instance_name }}" + region: "{{ aws_region }}" + wait: yes + register: result + + - assert: + that: + - result.changed == False + + - name: Delete the instance snapshot + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + region: "{{ aws_region }}" + state: absent + register: result + + - assert: + that: + - result.changed == True + + - name: Make sure instance snapshot deletion is idempotent + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + region: "{{ aws_region }}" + state: absent + register: result + + - assert: + that: + - result.changed == False + + # ==== Cleanup ==================================================== + + always: + + - name: Cleanup - delete instance snapshot + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + region: "{{ aws_region }}" + state: absent + ignore_errors: yes + + - name: Cleanup - delete instance + lightsail: + name: "{{ instance_name }}" + state: absent + ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/lightsail_static_ip/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/lightsail_static_ip/tasks/main.yml index f8f327344..e0b452f3e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/lightsail_static_ip/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/lightsail_static_ip/tasks/main.yml @@ -2,10 +2,10 @@ - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/aliases b/ansible_collections/community/aws/tests/integration/targets/mq/aliases new file mode 100644 index 000000000..fef8ae9bd --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/aliases @@ -0,0 +1,13 @@ +# reason: missing-policy +# We don't have CI or 'unsupported' policy for Amazon MQ, yet +# reason: slow +# tests run about 30 minutes +unsupported + +cloud/aws + +mq_broker_info +mq_broker +mq_broker_config +mq_user_info +mq_user diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/mq/defaults/main.yml new file mode 100644 index 000000000..2199c2f63 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# default files for mq_* +broker_name: '{{resource_prefix}}-mq' +vpc_name: "{{ resource_prefix }}-vpc" +vpc_cidr: "10.0.0.0/16" +subnet_cidr: "10.0.1.0/24" +sg_name: "{{resource_prefix}}-sg" +tags: + workload_type: other
\ No newline at end of file diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.1.xml b/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.1.xml new file mode 100644 index 000000000..0fdc98e46 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.1.xml @@ -0,0 +1,17 @@ +<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<broker schedulePeriodForDestinationPurge="10000" xmlns="http://activemq.apache.org/schema/core"> + <!-- update 1 --> + <destinationPolicy> + <policyMap> + <policyEntries> + <policyEntry gcInactiveDestinations="true" inactiveTimoutBeforeGC="600000" topic=">"> + <pendingMessageLimitStrategy> + <constantPendingMessageLimitStrategy limit="1000"/> + </pendingMessageLimitStrategy> + </policyEntry> + <policyEntry gcInactiveDestinations="true" inactiveTimoutBeforeGC="600000" queue=">"/> + </policyEntries> + </policyMap> + </destinationPolicy> + <plugins/> +</broker> diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.1a.xml b/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.1a.xml new file mode 100644 index 000000000..b374d1357 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.1a.xml @@ -0,0 +1,21 @@ +<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<broker schedulePeriodForDestinationPurge="10000" xmlns="http://activemq.apache.org/schema/core"> + <!-- update 1 --> + + <destinationPolicy> + <policyMap> + <policyEntries> + <policyEntry gcInactiveDestinations="true" inactiveTimoutBeforeGC="600000" topic=">"> + <pendingMessageLimitStrategy> + <constantPendingMessageLimitStrategy limit="1000"/> + </pendingMessageLimitStrategy> + </policyEntry> + <policyEntry gcInactiveDestinations="true" inactiveTimoutBeforeGC="600000" queue=">"/> + </policyEntries> + </policyMap> + </destinationPolicy> + <plugins/> + +</broker> + + diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.2.xml b/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.2.xml new file mode 100644 index 000000000..0d10ebdc6 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/files/broker_cfg.2.xml @@ -0,0 +1,17 @@ +<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<broker schedulePeriodForDestinationPurge="10000" xmlns="http://activemq.apache.org/schema/core"> + <!-- update 2 --> + <destinationPolicy> + <policyMap> + <policyEntries> + <policyEntry gcInactiveDestinations="true" inactiveTimoutBeforeGC="600000" topic=">"> + <pendingMessageLimitStrategy> + <constantPendingMessageLimitStrategy limit="1000"/> + </pendingMessageLimitStrategy> + </policyEntry> + <policyEntry gcInactiveDestinations="true" inactiveTimoutBeforeGC="600000" queue=">"/> + </policyEntries> + </policyMap> + </destinationPolicy> + <plugins/> +</broker> diff --git a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/mq/meta/main.yml index 32cf5dda7..32cf5dda7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/aws_region_info/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/mq/meta/main.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_cleanup.yml new file mode 100644 index 000000000..9507f99fa --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_cleanup.yml @@ -0,0 +1,17 @@ +- name: cleanup broker + mq_broker: + broker_name: "{{ broker_name }}" + state: "absent" + ignore_errors: true + when: not ansible_check_mode +# we need to wait - otherwise env_cleanup.yml will fail +- name: wait until broker deletion is finished + mq_broker_info: + broker_id: "{{ broker_id }}" + register: result + # the condition will never be met - instead it wail fail in the end + until: result.broker['broker_state'] != 'DELETION_IN_PROGRESS' + retries: 15 + delay: 60 + ignore_errors: true + when: not ansible_check_mode diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_config_tests.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_config_tests.yml new file mode 100644 index 000000000..31c67438b --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_config_tests.yml @@ -0,0 +1,82 @@ +- name: get broker details + mq_broker_info: + broker_id: "{{ broker_id }}" + register: result +- name: verify test broker is running + assert: + fail_msg: "broker with id {{ broker_id }} is not in RUNNING state" + that: + - result.broker['broker_state'] == 'RUNNING' + when: not ansible_check_mode +- name: test 1 - send update to broker config + mq_broker_config: + broker_id: "{{ broker_id }}" + config_xml: "{{ lookup('file', '../files/broker_cfg.1.xml')}}" + register: result +- name: verify test1 + assert: + fail_msg: test1 failed + that: + - result.changed | bool + - result.broker['broker_id'] == broker_id + - result.configuration['id'] == result.broker['configurations']['pending']['id'] + - result.configuration['revision'] == result.broker['configurations']['pending']['revision'] + when: not ansible_check_mode +- name: test 1a - send same config again + mq_broker_config: + broker_id: "{{ broker_id }}" + config_xml: "{{ lookup('file', '../files/broker_cfg.1.xml')}}" + register: result +- name: verify test1a + assert: + fail_msg: test1a failed + that: + - not (result.changed | bool ) + when: not ansible_check_mode +- name: test 2 - send (almost) same config again - differs by whitespace + mq_broker_config: + broker_id: "{{ broker_id }}" + config_xml: "{{ lookup('file', '../files/broker_cfg.1a.xml')}}" + register: result +- name: verify test2 + assert: + fail_msg: test2 failed + that: + - not (result.changed | bool ) + when: not ansible_check_mode +- name: test 3 - send new config with custom description and request reboot + mq_broker_config: + broker_id: "{{ broker_id }}" + config_xml: "{{ lookup('file', '../files/broker_cfg.2.xml')}}" + config_description: "test 3 used custom description" + reboot: true + register: result +- name: verify test3 + assert: + fail_msg: test3 failed + that: + - result.changed | bool + - result.broker['broker_state'] == 'REBOOT_IN_PROGRESS' + when: not ansible_check_mode +- name: wait for reboot + mq_broker_info: + broker_id: "{{ broker_id }}" + register: result + until: result.broker['broker_state'] == 'RUNNING' + retries: 15 + delay: 60 + when: not ansible_check_mode +- name: test 3a - send new config again + mq_broker_config: + broker_id: "{{ broker_id }}" + config_xml: "{{ lookup('file', '../files/broker_cfg.2.xml')}}" + config_description: "test 3 used custom description" + reboot: true + register: result +- name: verify test3a + assert: + fail_msg: test3a failed + that: + - not (result.changed | bool ) + when: not ansible_check_mode +# Note: currently there's no way to delete a broker configuration (version) diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_delete_tests.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_delete_tests.yml new file mode 100644 index 000000000..bde36cd13 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_delete_tests.yml @@ -0,0 +1,43 @@ +- name: delete broker + mq_broker: + broker_name: "{{ broker_name }}" + state: "absent" + register: result +- name: verify broker delete + assert: + fail_msg: broker delete failed + that: + - ( result.changed | bool) + when: not ansible_check_mode +- name: get details after delete + mq_broker_info: + broker_name: "{{ broker_name }}" + register: result_d1 +- name: verify broker deletion on progress + assert: + fail_msg: broker delete too fast? + that: + - result_d1.broker['broker_state'] == 'DELETION_IN_PROGRESS' + when: not ansible_check_mode +- name: repeat broker deletion + mq_broker: + broker_name: "{{ broker_name }}" + state: "absent" + register: result +- name: verify broker repeated delete + assert: + fail_msg: didn't detect DELETION_IN_PROGRESS in progress + that: + - not ( result.changed | bool) + when: not ansible_check_mode +- name: deletion unknown broker - simulates re-deletion of completely deleted broker + mq_broker: + broker_name: "{{ broker_name }}__unknown_broker__" + state: "absent" + register: result +- name: verify delete unknown broker + assert: + fail_msg: deletion of unknown broker return unexpected result + that: + - not ( result.changed | bool) + when: not ansible_check_mode diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_tests.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_tests.yml new file mode 100644 index 000000000..515306abf --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_tests.yml @@ -0,0 +1,120 @@ +- name: create broker with minimal parameters + mq_broker: + broker_name: "{{ broker_name }}" + security_groups: "{{ broker_sg_ids.split(',') }}" + subnet_ids: "{{ broker_subnet_ids.split(',') }}" + tags: "{{ tags }}" + wait: true + register: result +- set_fact: + broker_id: "{{ result.broker['broker_id'] }}" +- name: get broker details by id + mq_broker_info: + broker_id: "{{ broker_id }}" + register: result_c1 +- name: verify creation result + assert: + fail_msg: broker creation failed + that: + # change state is from previous operation: + - ( result.changed | bool ) + - result_c1.broker['broker_id'] == broker_id + - result_c1.broker['broker_name'] == broker_name + - result_c1.broker['broker_state'] == 'RUNNING' + - ( result_c1.broker['storage_type'] | upper ) == 'EFS' + - result_c1.broker['tags'] == tags + when: not ansible_check_mode +- name: repeat creation + mq_broker: + broker_name: "{{ broker_name }}" + security_groups: "{{ broker_sg_ids.split(',') }}" + subnet_ids: "{{ broker_subnet_ids.split(',') }}" + register: result +- set_fact: + broker_id: "{{ result.broker['broker_id'] }}" +- name: get broker details - this time by name + mq_broker_info: + broker_name: "{{ broker_name }}" + register: result_c2 +- name: verify broker re-creation + assert: + fail_msg: broker re-creation failed + that: + # change state is from previous operation: + - not ( result.changed | bool) + - result_c2.broker['broker_id'] == broker_id + - result_c2.broker['broker_name'] == broker_name + - ( result_c2.broker['storage_type'] | upper ) == 'EFS' + when: not ansible_check_mode +- name: update broker + mq_broker: + broker_name: "{{ broker_name }}" + auto_minor_version_upgrade: false + storage_type: EBS + register: result +- name: verify broker update + assert: + fail_msg: broker update failed + that: + - ( result.changed | bool) + - result.broker['broker_id'] == broker_id + when: not ansible_check_mode +- name: reboot broker to make pending changes active + mq_broker: + broker_name: "{{ broker_name }}" + state: "restarted" + register: result +- name: get broker details by id + mq_broker_info: + broker_id: "{{ broker_id }}" + register: result_r1 +- name: check for pending reboot + assert: + fail_msg: trigger reboot failed + that: + - result.changed | bool + - result_r1.broker['broker_state'] == 'REBOOT_IN_PROGRESS' + when: not ansible_check_mode +- debug: + msg: "Wait until reboot of broker {{ broker_name }} ({{ broker_id }}) is finished. This may take several minutes" +- name: wait for reboot + mq_broker_info: + broker_id: "{{ broker_id }}" + register: result + until: result.broker['broker_state'] == 'RUNNING' + retries: 15 + delay: 60 + when: not ansible_check_mode +- name: get details after update + mq_broker_info: + broker_name: "{{ broker_name }}" + register: result_u1 +- name: verify broker update + assert: + fail_msg: broker update failed + that: + - result_u1.broker['broker_id'] == broker_id + - result_u1.broker['broker_name'] == broker_name + - not ( result_u1.broker['auto_minor_version_upgrade'] | bool ) + # the next one checks that changes to create-only parameters are silently ignore + - result_u1.broker['storage_type'] == result_c1.broker['storage_type'] + when: not ansible_check_mode +- name: repeat update broker + mq_broker: + broker_name: "{{ broker_name }}" + auto_minor_version_upgrade: false + storage_type: EBS + register: result +- name: get details after re-update + mq_broker_info: + broker_name: "{{ broker_name }}" + register: result_u2 +- name: verify broker re-update + assert: + fail_msg: broker update failed + that: + - not ( result.changed | bool) + - result_u2.broker['broker_id'] == result_u1.broker['broker_id'] + - result_u2.broker['storage_type'] == result_u1.broker['storage_type'] + - result_u2.broker['engine_version'] == result_u1.broker['engine_version'] + when: not ansible_check_mode diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_user_info_tests.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_user_info_tests.yml new file mode 100644 index 000000000..427e272b6 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_user_info_tests.yml @@ -0,0 +1,65 @@ +- name: set test data + set_fact: + create_users: + - "info_user1" + - "info_user2" + - "info_user3" + - "info_user4" + - "info_user5" + delete_users: + - "info_user2" + - "info_user5" +- name: prepare tests - create users + mq_user: + state: present + broker_id: "{{ broker_id }}" + username: "{{ item }}" + loop: "{{ create_users | flatten(levels=1) }}" +- name: prepare tests - delete users + mq_user: + state: absent + broker_id: "{{ broker_id }}" + username: "{{ item }}" + loop: "{{ delete_users | flatten(levels=1) }}" +- name: test2 - list all users + mq_user_info: + broker_id: "{{ broker_id }}" + register: result +- name: test2 - verify + assert: + fail_msg: test2 failed + that: + - result.users['info_user1'] + - result.users['info_user2'] + - result.users['info_user3'] + when: not ansible_check_mode +- name: test3 - list only user currently being active until next broker reboot + mq_user_info: + broker_id: "{{ broker_id }}" + skip_pending_create: true + register: result +- name: test3 - verify + assert: + fail_msg: test3 failed + that: + - not ('info_user1' in result.users) + - result.users['info_user2'] + - not ('info_user3' in result.users) + - not ('info_user4' in result.users) + - result.users['info_user5'] + when: not ansible_check_mode +- name: test4 - list only user that will be active after next broker reboot + mq_user_info: + broker_id: "{{ broker_id }}" + skip_pending_delete: true + register: result +- name: test4 - verify + assert: + fail_msg: test4 failed + that: + - result.users['info_user1'] + - not ('info_user2' in result.users) + - result.users['info_user3'] + - result.users['info_user4'] + - not ('info_user5' in result.users) + when: not ansible_check_mode diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_user_tests.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_user_tests.yml new file mode 100644 index 000000000..6a30c694b --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/broker_user_tests.yml @@ -0,0 +1,173 @@ +- name: set test data + set_fact: + usernames: + - "test_user1" + - "test_user2" + - "test_user3" + +- name: test1 - create user with default settings + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[0] }}" + register: result +- name: test1 - verify + assert: + fail_msg: test1 failed + that: + - result.changed | bool + - result.user['username'] == usernames[0] + - not (result.user['pending']['console_access'] | bool) + - result.user['pending']['groups'] | length == 0 + when: not ansible_check_mode +- name: test2 - create user with console access and group list + mq_user: + state: present + broker_id: "{{ broker_id }}" + username: "{{ usernames[1] }}" + console_access: true + groups: [ "g1", "g2" ] + register: result +- name: test2 - verify + assert: + fail_msg: test2 failed + that: + - result.changed | bool + - result.user['username'] == usernames[1] + - result.user['pending']['console_access'] | bool + - result.user['pending']['groups'] | length == 2 + when: not ansible_check_mode +- name: test3 - create user with defined password + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[2] }}" + password: "09234092jzxkjvjk23kn23qn5lk34" + register: result +- name: test3 - verify + assert: + fail_msg: test3 failed + that: + - result.changed | bool + - result.user['username'] == usernames[2] + - not (result.user['pending']['console_access'] | bool) + - result.user['pending']['groups'] | length == 0 + when: not ansible_check_mode +- name: test4 - update user password - ignore mode + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[2] }}" + password: "new_password_ignored" + register: result +- name: test4 - verify + assert: + fail_msg: test4 failed + that: + - not (result.changed | bool) + when: not ansible_check_mode +- name: test5 - update user password - force mode + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[2] }}" + password: "new_Password_Accepted0815%" + allow_pw_update: true + register: result +- name: test5 - verify + assert: + fail_msg: test5 failed + that: + - result.changed | bool + when: not ansible_check_mode +- name: test6 - update console access - same value + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[2] }}" + console_access: false + register: result +- name: test6 - verify + assert: + fail_msg: test6 failed + that: + - not (result.changed | bool) + when: not ansible_check_mode +- name: test7 - update console access - new value + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[1] }}" + console_access: false + register: result +- name: test7 - verify + assert: + fail_msg: test7 failed + that: + - result.changed | bool + - not( result.user['pending']['console_access'] | bool ) + - result.user['pending']['groups'] | length == 2 + when: not ansible_check_mode +- name: test8 - update group list - same list but different order + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[1] }}" + groups: [ "g2", "g1" ] + register: result +- name: test8 - verify + assert: + fail_msg: test8 failed + that: + - not (result.changed | bool) + when: not ansible_check_mode +- name: test9 - update group list - add element + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[1] }}" + groups: [ "g2", "g1", "g3" ] + register: result +- name: test9 - verify + assert: + fail_msg: test9 failed + that: + - result.changed | bool + - result.user['pending']['groups'] | length == 3 + when: not ansible_check_mode +- name: test10 - update group list - remove element + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[1] }}" + groups: [ "g2", "g3" ] + register: result +- name: test10 - verify + assert: + fail_msg: test10 failed + that: + - result.changed | bool + - result.user['pending']['groups'] | length == 2 + when: not ansible_check_mode +- name: test11 - update group list - set to empty list + mq_user: + broker_id: "{{ broker_id }}" + username: "{{ usernames[1] }}" + groups: [] + register: result +- name: test11 - verify + assert: + fail_msg: test11 failed + that: + - result.changed | bool + - result.user['pending']['groups'] | length == 0 + when: not ansible_check_mode +- name: delete all users + mq_user: + state: absent + broker_id: "{{ broker_id }}" + username: "{{ item }}" + loop: "{{ usernames | flatten(levels=1) }}" +- name: test13 - delete deleted user + mq_user: + state: absent + broker_id: "{{ broker_id }}" + username: "{{ usernames[1] }}" + register: result +- name: test13 - verify + assert: + fail_msg: test13 failed + that: + - not(result.changed | bool) + when: not ansible_check_mode diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/env_cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/env_cleanup.yml new file mode 100644 index 000000000..0ccb37907 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/env_cleanup.yml @@ -0,0 +1,33 @@ +- name: remove the security group + ec2_security_group: + name: "{{ sg_name }}" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove subnet A + ec2_vpc_subnet: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove the VPC + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + state: absent + tags: + Name: Ansible Testing VPC + tenancy: default + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/env_setup.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/env_setup.yml new file mode 100644 index 000000000..e27b66f27 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/env_setup.yml @@ -0,0 +1,25 @@ +- name: Create VPC for use in testing + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: Ansible ec2_instance Testing VPC + tenancy: default + register: testing_vpc + +- name: Create subnet in zone A + ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + az: "{{ aws_region }}a" + resource_tags: + Name: "{{ resource_prefix }}-subnet-a" + register: testing_subnet_a + +- name: create a security group with the vpc + ec2_security_group: + name: "{{ sg_name }}" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + register: testing_sg diff --git a/ansible_collections/community/aws/tests/integration/targets/mq/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/main.yml new file mode 100644 index 000000000..e84367a76 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/mq/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: run amazon MQ tests + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + aws_region: "{{ aws_region }}" + collections: + - amazon.aws + + block: + - name: set up environment for testing. + include_tasks: env_setup.yml + - name: set subnet and security group + set_fact: + broker_subnet_ids: "{{ testing_subnet_a.subnet.id }}" + broker_sg_ids: "{{ testing_sg.group_id }}" + - name: run broker tests + include_tasks: broker_tests.yml + # re-user broker_id for other tests + - name: run broker config tests + include_tasks: broker_config_tests.yml + - name: run broker user tests + include_tasks: broker_user_tests.yml + - name: run broker user info tests + include_tasks: broker_user_info_tests.yml + - name: run broker delete tests + include_tasks: broker_delete_tests.yml + + always: + - name: cleanup broker + include_tasks: broker_cleanup.yml + + - include_tasks: env_cleanup.yml diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml b/ansible_collections/community/aws/tests/integration/targets/mq/vars/main.yml index ed97d539c..ed97d539c 100644 --- a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/mq/vars/main.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/main.yml index 5a6487607..9ed2e92d5 100644 --- a/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/main.yml @@ -2,9 +2,9 @@ - name: aws_msk_cluster integration tests module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws @@ -42,7 +42,7 @@ # ============================================================ - name: create msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: @@ -55,14 +55,14 @@ always: - name: delete msk cluster - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: absent wait: true ignore_errors: yes - name: remove msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: absent ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml b/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml index d7cdd3a71..9535c235f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml +++ b/ansible_collections/community/aws/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml @@ -1,6 +1,6 @@ --- - name: create a msk cluster with authentication flipped from default (check mode) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" @@ -24,7 +24,7 @@ - msk_cluster is changed - name: create a msk cluster with authentication flipped from default - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" @@ -62,10 +62,10 @@ # Not always returned by API # - "msk_cluster.cluster_info.client_authentication.unauthenticated.enabled == false" - "msk_cluster.cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker == false" - - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:{{ aws_region }}:')" + - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:' ~ aws_region ~ ':')" - name: create a msk cluster with authentication flipped from default (idempotency) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" @@ -89,7 +89,7 @@ ### Keep delete simple as we're not checking delete here - name: delete msk cluster - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "absent" wait: true diff --git a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/main.yml index a3049dad0..6425d7ec7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/main.yml @@ -2,9 +2,9 @@ - name: aws_msk_cluster integration tests module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws @@ -42,7 +42,7 @@ # ============================================================ - name: create msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: @@ -61,14 +61,14 @@ always: - name: delete msk cluster - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: absent wait: true ignore_errors: yes - name: remove msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: absent ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_create.yml b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_create.yml index 4fd7073cc..f6845059f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_create.yml +++ b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_create.yml @@ -1,6 +1,6 @@ --- - name: create msk cluster (check mode) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" @@ -20,7 +20,7 @@ - msk_cluster is changed - name: create msk cluster - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" @@ -50,10 +50,10 @@ - "msk_cluster.cluster_info.broker_node_group_info.instance_type == 'kafka.t3.small'" - "msk_cluster.cluster_info.broker_node_group_info.storage_info.ebs_storage_info.volume_size == 10" - "msk_cluster.cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker == false" - - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:{{ aws_region }}:')" + - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:' ~ aws_region ~ ':')" - name: create msk cluster (idempotency) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_delete.yml b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_delete.yml index efd90fa14..53a0d7c8f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_delete.yml +++ b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_delete.yml @@ -1,6 +1,6 @@ --- - name: delete msk cluster (check mode) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "absent" wait: true @@ -13,7 +13,7 @@ - msk_cluster is changed - name: delete msk cluster - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "absent" wait: true @@ -25,7 +25,7 @@ - msk_cluster is changed - name: delete msk cluster (idempotency) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "absent" wait: true diff --git a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_update.yml b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_update.yml index 50ac91718..600d8eb59 100644 --- a/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_update.yml +++ b/ansible_collections/community/aws/tests/integration/targets/msk_cluster/tasks/test_update.yml @@ -1,6 +1,6 @@ --- - name: update msk cluster (check mode) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" @@ -22,7 +22,7 @@ - msk_cluster is changed - name: update msk cluster - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" @@ -51,7 +51,7 @@ - "msk_cluster.cluster_info.tags.key3 == 'value3'" - name: update msk cluster (idempotency) - aws_msk_cluster: + msk_cluster: name: "{{ msk_cluster_name }}" state: "present" version: "{{ msk_version }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/msk_config/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/msk_config/tasks/main.yml index cef9e1dfc..5f7f6c782 100644 --- a/ansible_collections/community/aws/tests/integration/targets/msk_config/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/msk_config/tasks/main.yml @@ -2,15 +2,15 @@ - name: tests for community.aws.aws_msk_config module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws block: - name: create msk configuration (check mode) - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: "{{ msk_kafka_versions }}" @@ -24,7 +24,7 @@ - msk_config is changed - name: create msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: "{{ msk_kafka_versions }}" @@ -37,7 +37,7 @@ - msk_config is changed - name: create msk configuration (idempotency) - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: "{{ msk_kafka_versions }}" @@ -53,12 +53,12 @@ assert: that: - msk_config.revision == 1 - - "msk_config.arn.startswith('arn:aws:kafka:{{ aws_region }}:')" + - "msk_config.arn.startswith('arn:aws:kafka:' ~ aws_region ~ ':')" - "'auto.create.topics.enable=True' in msk_config.server_properties" - "'zookeeper.session.timeout.ms=18000' in msk_config.server_properties" - name: update msk configuration (check mode) - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: "{{ msk_kafka_versions }}" @@ -72,7 +72,7 @@ - msk_config is changed - name: update msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: "{{ msk_kafka_versions }}" @@ -93,7 +93,7 @@ - "'zookeeper.session.timeout.ms=36000' in msk_config.server_properties" - name: update msk configuration (idempotency) - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "present" kafka_versions: "{{ msk_kafka_versions }}" @@ -106,7 +106,7 @@ - msk_config is not changed - name: delete msk configuration (check mode) - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "absent" check_mode: yes @@ -118,7 +118,7 @@ - msk_config is changed - name: delete msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "absent" register: msk_config @@ -129,7 +129,7 @@ - msk_config is changed - name: delete msk configuration (idempotency) - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: "absent" register: msk_config @@ -142,7 +142,7 @@ always: - name: remove msk configuration - aws_msk_config: + msk_config: name: "{{ msk_config_name }}" state: absent ignore_errors: yes diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall/tasks/main.yml index 6a77d4f93..5a60654d8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall/tasks/main.yml @@ -5,9 +5,9 @@ - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/meta/main.yml index f09ab4af1..32cf5dda7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/meta/main.yml @@ -1,4 +1 @@ -dependencies: - - role: setup_botocore_pip - vars: - botocore_version: "1.23.23" +dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/default_order.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/default_order.yml index 50df7e7ab..4c7d2ba25 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/default_order.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/default_order.yml @@ -223,8 +223,6 @@ stateful_rule_order: strict register: default_policy ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -237,8 +235,6 @@ stateful_rule_order: strict register: default_policy ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1143,8 +1139,6 @@ - 'aws:drop_strict' register: default_policy ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1158,8 +1152,6 @@ - 'aws:drop_strict' register: default_policy ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/main.yml index d3890c680..14c3d1182 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/main.yml @@ -1,10 +1,10 @@ --- - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' collections: - amazon.aws - community.aws @@ -27,8 +27,6 @@ # Tests specifically related to policies using 'strict' rule order - include_tasks: 'strict_order.yml' - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - include_tasks: 'actions.yml' diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/setup.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/setup.yml index 27f0ebb48..e77e4d9a9 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/setup.yml @@ -23,9 +23,6 @@ rule_order: strict register: strict_groups loop: '{{ range(1,4,1) | list }}' - # Setting rule order requires botocore>=1.23.23 - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - debug: var: default_groups diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/strict_order.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/strict_order.yml index b842eebae..745009bf5 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/strict_order.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_policy/tasks/strict_order.yml @@ -260,8 +260,6 @@ stateful_rule_order: default register: strict_policy ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -274,8 +272,6 @@ stateful_rule_order: default register: strict_policy ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/aliases b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/aliases index 3a0301661..ef3989f4b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/aliases @@ -1,4 +1,6 @@ time=18m cloud/aws +# Idempotency issues - https://github.com/ansible-collections/community.aws/issues/1634 +disabled networkfirewall_rule_group_info diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/meta/main.yml index f09ab4af1..32cf5dda7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/meta/main.yml @@ -1,4 +1 @@ -dependencies: - - role: setup_botocore_pip - vars: - botocore_version: "1.23.23" +dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/main.yml index a6e84426e..46823c3c8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/main.yml @@ -1,10 +1,10 @@ --- - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' collections: - amazon.aws - community.aws @@ -22,8 +22,6 @@ # List the Managed Rule Groups (there's no access to the rules themselves) - include_tasks: 'managed.yml' - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" # Minimal tests and manipulation of common metadata - include_tasks: 'minimal.yml' diff --git a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/stateful.yml b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/stateful.yml index 3b92a4cee..b6f51eff5 100644 --- a/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/stateful.yml +++ b/ansible_collections/community/aws/tests/integration/targets/networkfirewall_rule_group/tasks/stateful.yml @@ -1078,8 +1078,6 @@ rule_order: 'strict' register: stateful_group ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1092,8 +1090,6 @@ rule_order: 'strict' register: stateful_group ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1142,8 +1138,6 @@ rule_order: strict register: strict_group check_mode: true - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1170,8 +1164,6 @@ - 'pass tcp any any -> any any (sid:1000001;)' rule_order: strict register: strict_group - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1208,8 +1200,6 @@ rule_order: strict register: strict_group check_mode: true - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1238,8 +1228,6 @@ - 'pass tcp any any -> any any (sid:1000001;)' rule_order: strict register: strict_group - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1269,8 +1257,6 @@ rule_order: 'default' register: strict_group ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1283,8 +1269,6 @@ rule_order: 'default' register: strict_group ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1299,8 +1283,6 @@ rule_order: 'strict' register: strict_group ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -1313,8 +1295,6 @@ rule_order: 'strict' register: strict_group ignore_errors: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: diff --git a/ansible_collections/community/aws/tests/integration/targets/opensearch/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/opensearch/meta/main.yml index 13d6ecd91..32cf5dda7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/opensearch/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/opensearch/meta/main.yml @@ -1,4 +1 @@ -dependencies: - - role: setup_botocore_pip - vars: - botocore_version: "1.21.38" +dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/main.yml index 6d3b47cad..e3c33d238 100644 --- a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/main.yml @@ -4,17 +4,15 @@ module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" route53: # Route53 is explicitly a global service region: null collections: - amazon.aws - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" block: # Get some information about who we are before starting our tests diff --git a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_create_cert.yml b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_create_cert.yml index 533e75e96..5492bb922 100644 --- a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_create_cert.yml +++ b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_create_cert.yml @@ -1,10 +1,3 @@ -- pip: - name: - # The 'cryptography' module is required by community.crypto.openssl_privatekey - - 'cryptography' - virtualenv: "{{ botocore_virtualenv }}" - virtualenv_command: "{{ botocore_virtualenv_command }}" - virtualenv_site_packages: no - name: Create temporary directory ansible.builtin.tempfile: state: directory @@ -28,7 +21,7 @@ privatekey_path: '{{ tempdir_1.path }}/rsa-private-key.pem' selfsigned_digest: sha256 - name: import certificate to ACM - aws_acm: + acm_certificate: name_tag: 'opensearch.ansible-integ-test.com' domain_name: 'opensearch.ansible-integ-test.com' certificate: "{{ lookup('file', tempdir_1.path + '/rsa-certificate.pem') }}" @@ -50,4 +43,4 @@ - name: Delete temporary directory ansible.builtin.file: state: absent - path: "{{ tempdir_1.path }}"
\ No newline at end of file + path: "{{ tempdir_1.path }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_resources.yml b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_resources.yml index d9ddfc913..470706f15 100644 --- a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_resources.yml +++ b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_resources.yml @@ -43,12 +43,12 @@ vpc_name: "{{ item.tags['Name'] }}" - name: collect info about KMS keys used for test purpose - aws_kms_info: + kms_key_info: filters: "tag:AnsibleTest": "AnsibleTestVpc" register: kms_info - name: Delete KMS keys that were created for test purpose - aws_kms: + kms_key: key_id: "{{ kms_arn }}" state: absent with_items: "{{ kms_info.kms_keys }}" @@ -56,6 +56,6 @@ kms_arn: "{{ item.key_arn }}" - name: delete certificate from ACM - aws_acm: + acm_certificate: name_tag: 'opensearch.ansible-integ-test.com' state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_vpc_resources.yml b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_vpc_resources.yml index 5fb803c90..b0cfa6434 100644 --- a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_vpc_resources.yml +++ b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_delete_vpc_resources.yml @@ -30,13 +30,13 @@ when: route53_zone_ids | length > 0 - name: Get security groups that have been created for test purpose in the VPC - ec2_group_info: + ec2_security_group_info: filters: vpc-id: "{{ vpc_id }}" register: sg_info - name: Delete security groups - ec2_group: + ec2_security_group: group_id: "{{ sg_id }}" state: absent loop_control: diff --git a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_vpc_setup.yml b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_vpc_setup.yml index 90aeb50bb..6e1fec1ab 100644 --- a/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_vpc_setup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/opensearch/tasks/test_vpc_setup.yml @@ -83,7 +83,7 @@ AnsibleTest: AnsibleTestVpc - name: Create security group for use in testing - ec2_group: + ec2_security_group: name: "{{ tiny_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" @@ -120,7 +120,7 @@ - name: Create KMS key for test purpose # The key is needed for OpenSearch encryption at rest. - aws_kms: + kms_key: alias: "{{ tiny_prefix }}-kms" description: a key used for encryption at rest in test OpenSearch cluster state: present diff --git a/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml index f79991d4e..a50c0372e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml @@ -8,9 +8,9 @@ - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: @@ -120,7 +120,7 @@ assert: that: - 'result.changed' - - 'result.cluster.identifier == "{{ redshift_cluster_name }}"' + - result.cluster.identifier == redshift_cluster_name - 'result.cluster.tags.foo == "bar"' - 'result.cluster.tags.Tizio == "Caio"' @@ -143,7 +143,7 @@ assert: that: - 'not result.changed' - - 'result.cluster.identifier == "{{ redshift_cluster_name }}"' + - result.cluster.identifier == redshift_cluster_name - 'result.cluster.tags.foo == "bar"' - 'result.cluster.tags.Tizio == "Caio"' - 'result.cluster.tags | count() == 2' @@ -166,7 +166,7 @@ assert: that: - 'result.changed' - - 'result.cluster.identifier == "{{ redshift_cluster_name }}-modified"' + - result.cluster.identifier == redshift_cluster_name ~ '-modified' - 'result.cluster.enhanced_vpc_routing == True' - 'result.cluster.tags | count() == 1' - 'result.cluster.tags.foo == "bar"' @@ -234,7 +234,7 @@ assert: that: - 'result.changed' - - 'result.cluster.identifier == "{{ redshift_cluster_name }}"' + - result.cluster.identifier == redshift_cluster_name - 'result.cluster.db_name == "integration_test"' # ============================================================ @@ -260,7 +260,7 @@ assert: that: - 'result.changed' - - 'result.cluster.identifier == "{{ redshift_cluster_name }}"' + - result.cluster.identifier == redshift_cluster_name - 'result.cluster.db_name == "integration_test"' - 'result.cluster.tags.foo == "bar"' @@ -289,7 +289,7 @@ assert: that: - 'result.changed' - - 'result.cluster.identifier == "{{ redshift_cluster_name }}"' + - result.cluster.identifier == redshift_cluster_name - 'result.cluster.db_name == "integration_test"' - 'result.cluster.tags.test1 == "value1"' - 'result.cluster.tags.foo == "bar"' @@ -318,7 +318,7 @@ assert: that: - 'not result.changed' - - 'result.cluster.identifier == "{{ redshift_cluster_name }}"' + - result.cluster.identifier == redshift_cluster_name - 'result.cluster.db_name == "integration_test"' - 'result.cluster.tags | count() == 2' diff --git a/ansible_collections/community/aws/tests/integration/targets/redshift_subnet_group/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/redshift_subnet_group/tasks/main.yml index e15ee9b93..0df7d98d0 100644 --- a/ansible_collections/community/aws/tests/integration/targets/redshift_subnet_group/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/redshift_subnet_group/tasks/main.yml @@ -9,9 +9,9 @@ # - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/route53_wait/aliases b/ansible_collections/community/aws/tests/integration/targets/route53_wait/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/route53_wait/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/community/aws/tests/integration/targets/route53_wait/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/route53_wait/tasks/main.yml new file mode 100644 index 000000000..f9df05f5c --- /dev/null +++ b/ansible_collections/community/aws/tests/integration/targets/route53_wait/tasks/main.yml @@ -0,0 +1,245 @@ +--- +# tasks file for route53_wait integration tests + +- set_fact: + zone_one: '{{ resource_prefix | replace("-", "") }}.one.ansible.test.' +- debug: + msg: Set zone {{ zone_one }} + +- name: Test basics (new zone, A and AAAA records) + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + amazon.aws.route53: + # Route53 is explicitly a global service + region: + block: + - name: create VPC + ec2_vpc_net: + cidr_block: 192.0.2.0/24 + name: '{{ resource_prefix }}_vpc' + state: present + register: vpc + + - name: Create a zone + route53_zone: + zone: '{{ zone_one }}' + comment: Created in Ansible test {{ resource_prefix }} + tags: + TestTag: '{{ resource_prefix }}.z1' + register: z1 + + - name: Create A record (check mode) + route53: + state: present + hosted_zone_id: '{{ z1.zone_id }}' + record: test.{{ zone_one }} + overwrite: true + type: A + value: 192.0.2.1 + wait: false + register: result + check_mode: true + - assert: + that: + - result is not failed + - result is changed + - "'wait_id' in result" + - result.wait_id is none + + - name: Wait for A record to propagate (should do nothing) + route53_wait: + result: '{{ result }}' + + - name: Create A record + route53: + state: present + hosted_zone_id: '{{ z1.zone_id }}' + record: test.{{ zone_one }} + overwrite: true + type: A + value: 192.0.2.1 + wait: false + register: result + - assert: + that: + - result is not failed + - result is changed + - "'wait_id' in result" + - result.wait_id is string + + - name: Wait for A record to propagate + route53_wait: + result: '{{ result }}' + + - name: Create A record (idempotent) + route53: + state: present + hosted_zone_id: '{{ z1.zone_id }}' + record: test.{{ zone_one }} + overwrite: true + type: A + value: 192.0.2.1 + wait: false + register: result + - assert: + that: + - result is not failed + - result is not changed + - "'wait_id' not in result" + + - name: Wait for A record to propagate (should do nothing) + route53_wait: + result: '{{ result }}' + + - name: Create A records + route53: + state: present + hosted_zone_id: '{{ z1.zone_id }}' + record: '{{ item.record }}' + overwrite: true + type: A + value: '{{ item.value }}' + wait: false + loop: + - record: test-1.{{ zone_one }} + value: 192.0.2.1 + - record: test-2.{{ zone_one }} + value: 192.0.2.2 + - record: test-3.{{ zone_one }} + value: 192.0.2.3 + register: results + - assert: + that: + - results is not failed + - results is changed + - results.results | length == 3 + - results.results[0] is changed + - results.results[1] is changed + - results.results[2] is changed + + - name: Wait for A records to propagate + route53_wait: + results: '{{ results }}' + + - name: Create A records (idempotent) + route53: + state: present + hosted_zone_id: '{{ z1.zone_id }}' + record: '{{ item.record }}' + overwrite: true + type: A + value: '{{ item.value }}' + wait: false + loop: + - record: test-1.{{ zone_one }} + value: 192.0.2.1 + - record: test-2.{{ zone_one }} + value: 192.0.2.2 + - record: test-3.{{ zone_one }} + value: 192.0.2.3 + register: results + - assert: + that: + - results is not failed + - results is not changed + - results.results | length == 3 + - results.results[0] is not changed + - results.results[1] is not changed + - results.results[2] is not changed + + - name: Wait for A records to propagate (should do nothing) + route53_wait: + results: '{{ results }}' + + - name: Update some A records + route53: + state: present + hosted_zone_id: '{{ z1.zone_id }}' + record: '{{ item.record }}' + overwrite: true + type: A + value: '{{ item.value }}' + wait: false + loop: + - record: test-1.{{ zone_one }} + value: 192.0.2.1 + - record: test-2.{{ zone_one }} + value: 192.0.2.4 + - record: test-3.{{ zone_one }} + value: 192.0.2.3 + register: results + - assert: + that: + - results is not failed + - results is changed + - results.results | length == 3 + - results.results[0] is not changed + - results.results[1] is changed + - results.results[2] is not changed + + - name: Wait for A records to propagate + route53_wait: + results: '{{ results }}' + +#Cleanup------------------------------------------------------ + + always: + + - route53_info: + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + register: z1_records + + - name: Loop over A/AAAA/CNAME records and delete them + route53: + state: absent + zone: '{{ zone_one }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + weight: '{{ item.Weight | default(omit) }}' + identifier: '{{ item.SetIdentifier }}' + region: '{{ omit }}' + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + - '"SetIdentifier" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + route53: + state: absent + zone: '{{ zone_one }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + + - name: Delete test zone one {{ zone_one }} + route53_zone: + state: absent + zone: '{{ zone_one }}' + register: delete_one + ignore_errors: true + retries: 10 + until: delete_one is not failed + + - name: destroy VPC + ec2_vpc_net: + cidr_block: 192.0.2.0/24 + name: '{{ resource_prefix }}_vpc' + state: absent + register: remove_vpc + retries: 10 + delay: 5 + until: remove_vpc is success + ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/defaults/main.yml deleted file mode 100644 index 464c0a299..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -name_pattern: "testbucket-ansible-integration" -testing_buckets: - - "{{ tiny_prefix }}-{{ name_pattern }}-1" - - "{{ tiny_prefix }}-{{ name_pattern }}-2" diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/basic.yml b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/basic.yml deleted file mode 100644 index bf09665af..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/basic.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -- name: Get simple S3 bucket list - aws_s3_bucket_info: - register: bucket_list - -- name: Assert result.changed == False and bucket list was retrieved - assert: - that: - - bucket_list.changed == False - - bucket_list.buckets - -- name: Get complex S3 bucket list - aws_s3_bucket_info: - name_filter: "{{ name_pattern }}" - bucket_facts: - bucket_accelerate_configuration: true - bucket_acl: true - bucket_cors: true - bucket_encryption: true - bucket_lifecycle_configuration: true - bucket_location: true - bucket_logging: true - bucket_notification_configuration: true - bucket_policy: true - bucket_policy_status: true - bucket_replication: true - bucket_request_payment: true - bucket_tagging: true - bucket_website: true - public_access_block: true - transform_location: true - register: bucket_list - -- name: Assert that buckets list contains requested bucket facts - assert: - that: - - item.name is search(name_pattern) - - item.bucket_accelerate_configuration is defined - - item.bucket_acl is defined - - item.bucket_cors is defined - - item.bucket_encryption is defined - - item.bucket_lifecycle_configuration is defined - - item.bucket_location is defined - - item.bucket_logging is defined - - item.bucket_notification_configuration is defined - - item.bucket_policy is defined - - item.bucket_policy_status is defined - - item.bucket_replication is defined - - item.bucket_request_payment is defined - - item.bucket_tagging is defined - - item.bucket_website is defined - - item.public_access_block is defined - loop: "{{ bucket_list.buckets }}" - loop_control: - label: "{{ item.name }}" - -- name: Assert that retrieved bucket facts contains valid data - assert: - that: - - item.bucket_acl.Owner is defined - - item.bucket_tagging.snake_case is defined - - item.bucket_tagging.CamelCase is defined - - item.bucket_tagging["lowercase spaced"] is defined - - item.bucket_tagging["Title Case"] is defined - - item.bucket_tagging.snake_case == 'simple_snake_case' - - item.bucket_tagging.CamelCase == 'SimpleCamelCase' - - item.bucket_tagging["lowercase spaced"] == 'hello cruel world' - - item.bucket_tagging["Title Case"] == 'Hello Cruel World' - - item.bucket_location.LocationConstraint == aws_region - loop: "{{ bucket_list.buckets }}" - loop_control: - label: "{{ item.name }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml deleted file mode 100644 index 47d24cd0e..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: Test community.aws.aws_s3_bucket_info - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - block: - - name: Create a simple s3_bucket - s3_bucket: - name: "{{ item }}" - state: present - tags: - "lowercase spaced": "hello cruel world" - "Title Case": "Hello Cruel World" - CamelCase: "SimpleCamelCase" - snake_case: "simple_snake_case" - register: output - loop: "{{ testing_buckets }}" - - - include_tasks: basic.yml - - include_tasks: bucket_ownership_controls.yml - - always: - - name: Delete simple s3_buckets - s3_bucket: - name: "{{ item }}" - state: absent - loop: "{{ testing_buckets }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py index d0d08dae9..c2b19be1d 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py +++ b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py @@ -1,13 +1,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import json def lambda_handler(event, context): - return { - 'statusCode': 200, - 'body': json.dumps('Hello from Lambda!') - } + return {"statusCode": 200, "body": json.dumps("Hello from Lambda!")} diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml index ea7201065..ce81efc8c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml @@ -4,9 +4,9 @@ - community.general module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - include_tasks: test_sns_sqs_notifications.yml diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases b/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases index 4ef4b2067..1ba8d84ef 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases @@ -1 +1,2 @@ +time=17m cloud/aws diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/meta/main.yml index c01990664..32cf5dda7 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/meta/main.yml @@ -1,4 +1 @@ -dependencies: - - role: setup_botocore_pip - vars: - botocore_version: "1.23.12" +dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml index 7a15e4b66..d9f169561 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' s3_lifecycle: wait: true @@ -465,8 +465,6 @@ noncurrent_version_keep_newer: 6 prefix: /something register: output - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: @@ -479,8 +477,6 @@ noncurrent_version_keep_newer: 6 prefix: /something register: output - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - assert: that: diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml index f6c9a1710..e9a7b220b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml @@ -11,9 +11,9 @@ # - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' collections: - amazon.aws diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml index ba5cce9e6..9e9f1133a 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml @@ -6,10 +6,10 @@ # - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' collections: - amazon.aws block: diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml b/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml index cca7cad05..fdbc8cbfc 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml +++ b/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml @@ -6,10 +6,10 @@ aws s3api list-bucket-metrics-configurations --bucket {{ test_bucket }} environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key | default(omit) }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key | default(omit) }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" AWS_SESSION_TOKEN: "{{ security_token | default(omit) }}" - AWS_DEFAULT_REGION: "{{ aws_region | default(omit) }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" register: list_comand_result - set_fact: diff --git a/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml index 08496cd74..600490706 100644 --- a/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml @@ -5,9 +5,9 @@ - community.general module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: # ============================================================ @@ -23,7 +23,7 @@ - assert: that: - output.changed - - output.name == "{{ test_bucket }}" + - output.name == test_bucket - not output.requester_pays # ============================================================ - name: Prepare fixtures folder @@ -67,7 +67,7 @@ - assert: that: - output.changed - - output.name == "{{ test_bucket_2 }}" + - output.name == test_bucket_2 - not output.requester_pays - name: Sync files with remote bucket using glacier storage class @@ -113,7 +113,7 @@ - assert: that: - output.changed - - output.name == "{{ test_bucket_3 }}" + - output.name == test_bucket_3 - not output.requester_pays - name: Sync individual file with remote bucket @@ -158,14 +158,14 @@ - name: Empty all buckets before deleting block: - name: list test_bucket objects - aws_s3: + s3_object: bucket: "{{ test_bucket }}" mode: list register: objects ignore_errors: true - name: remove objects from test_bucket - aws_s3: + s3_object: bucket: "{{ test_bucket }}" mode: delobj object: "{{ obj }}" @@ -175,14 +175,14 @@ ignore_errors: true - name: list test_bucket_2 objects - aws_s3: + s3_object: bucket: "{{ test_bucket_2 }}" mode: list register: objects ignore_errors: true - name: remove objects from test_bucket_2 - aws_s3: + s3_object: bucket: "{{ test_bucket_2 }}" mode: delobj object: "{{ obj }}" @@ -192,14 +192,14 @@ ignore_errors: true - name: list test_bucket_3 objects - aws_s3: + s3_object: bucket: "{{ test_bucket_3 }}" mode: list register: objects ignore_errors: true - name: remove objects from test_bucket_3 - aws_s3: + s3_object: bucket: "{{ test_bucket_3 }}" mode: delobj object: "{{ obj }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/aliases b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/aliases index 4ef4b2067..e5729917b 100644 --- a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/aliases +++ b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/aliases @@ -1 +1,2 @@ +time=37m cloud/aws diff --git a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/basic.yml b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/basic.yml index 5d1fb071e..ea285ee05 100644 --- a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/basic.yml +++ b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/basic.yml @@ -23,9 +23,9 @@ # As a lookup plugin we won't have access to module_defaults connection_args: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" no_log: True - vars: @@ -73,7 +73,7 @@ # Creation testing # ============================================================ - name: add secret to AWS Secrets Manager - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: present secret_type: 'string' @@ -100,7 +100,7 @@ secret_arn: '{{ result.secret.arn }}' - name: no changes to secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: present secret_type: 'string' @@ -122,7 +122,7 @@ - result.secret.version_ids_to_stages | length == 1 - name: Set secret description - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -151,7 +151,7 @@ ############################################################### - name: Set tags (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -167,7 +167,7 @@ - result is changed - name: Set tags - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -197,7 +197,7 @@ - result.secret.version_ids_to_stages | length == 2 - name: Set tags - idempotency (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -213,7 +213,7 @@ - result is not changed - name: Set tags - idempotency - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -245,7 +245,7 @@ ### - name: Update tags with purge (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -261,7 +261,7 @@ - result is changed - name: Update tags with purge - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -291,7 +291,7 @@ - result.secret.version_ids_to_stages | length == 2 - name: Update tags with purge - idempotency (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -307,7 +307,7 @@ - result is not changed - name: Update tags with purge - idempotency - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -339,7 +339,7 @@ ### - name: Update tags without purge (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -356,7 +356,7 @@ - result is changed - name: Update tags without purge - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -387,7 +387,7 @@ - result.secret.version_ids_to_stages | length == 2 - name: Update tags without purge - idempotency (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -404,7 +404,7 @@ - result is not changed - name: Update tags without purge - idempotency - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -437,7 +437,7 @@ ### - name: Tags not set - idempotency (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -452,7 +452,7 @@ - result is not changed - name: Tags not set - idempotency - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -483,7 +483,7 @@ ### - name: remove all tags from secret (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -499,7 +499,7 @@ - result is changed - name: remove all tags from secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -529,7 +529,7 @@ - result.secret.version_ids_to_stages | length == 2 - name: remove all tags from secret - idempotency (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -545,7 +545,7 @@ - result is not changed - name: remove all tags from secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -579,7 +579,7 @@ ############################################################### - name: add resource policy to secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -594,7 +594,7 @@ - result.changed - name: remove existing resource policy from secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -608,7 +608,7 @@ - result.changed - name: remove resource policy from secret (idempotency) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -626,7 +626,7 @@ # ============================================================ - name: Update secret with JSON (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -642,7 +642,7 @@ - result.changed - name: Update secret with JSON - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: present description: 'this is a change to this secret' @@ -657,7 +657,7 @@ - result.changed - name: Update secret with JSON - idempotency (CHECK_MODE) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -673,7 +673,7 @@ - result is not changed - name: Update secret with JSON - idempotency - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to this secret' state: present @@ -693,7 +693,7 @@ # ============================================================ - name: Create secret with overwrite = False (Check mode) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-2" state: present secret_type: 'string' @@ -708,7 +708,7 @@ - result is changed - name: Create secret with overwrite = False - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-2" state: present secret_type: 'string' @@ -722,7 +722,7 @@ - result is changed - name: Update secret with overwrite = False (Check mode) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-2" state: present secret_type: 'string' @@ -737,7 +737,7 @@ - result is not changed - name: Create secret with overwrite = False - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-2" state: present secret_type: 'string' @@ -755,7 +755,7 @@ # ============================================================ - name: remove secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: absent recovery_window: 7 @@ -767,7 +767,7 @@ - result.changed - name: remove secret (idempotency) - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: absent recovery_window: 7 @@ -779,7 +779,7 @@ - not result.changed - name: immediate secret removal - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: absent recovery_window: 0 @@ -793,7 +793,7 @@ # AWS Doesn't expose when the secret will be removed, all we can do is # check that we didn't throw an error - name: immediate secret removal - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: absent recovery_window: 0 @@ -806,14 +806,14 @@ always: - name: remove secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: absent recovery_window: 0 ignore_errors: yes - name: remove secret 2 - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-2" state: absent recovery_window: 0 diff --git a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/main.yaml b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/main.yaml index 41fbedd9d..9011071f8 100644 --- a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/main.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/main.yaml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws diff --git a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/replication.yml b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/replication.yml index 30d3a9484..30f178c06 100644 --- a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/replication.yml +++ b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/replication.yml @@ -4,7 +4,7 @@ # Creation/Deletion testing # ============================================================ - name: add secret to AWS Secrets Manager - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: present secret_type: 'string' @@ -28,7 +28,7 @@ - result.version_ids_to_stages is not none - name: no changes to secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: present secret: "{{ super_secret_string }}" @@ -45,7 +45,7 @@ - result.arn is not none - name: remove region replica - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to remove replication' secret: "{{ super_secret_string }}" @@ -60,7 +60,7 @@ - '"replication_status" not in result.secret' - name: add region replica to an existing secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change add replication' secret: "{{ super_secret_string }}" @@ -80,7 +80,7 @@ - result.secret.replication_status[1]["kms_key_id"] == 'alias/aws/secretsmanager' - name: change replica regions - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: present secret: "{{ super_secret_string }}" @@ -100,7 +100,7 @@ always: - name: remove region replica - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" description: 'this is a change to remove replication' state: present @@ -109,7 +109,7 @@ ignore_errors: yes - name: remove secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}" state: absent recovery_window: 0 diff --git a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml index 5a1d146e5..697c5ecc2 100644 --- a/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml +++ b/ansible_collections/community/aws/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws @@ -70,7 +70,7 @@ # Creation/Deletion testing # ============================================================ - name: add secret to AWS Secrets Manager - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-rotate" state: present secret_type: 'string' @@ -95,7 +95,7 @@ principal: "secretsmanager.amazonaws.com" - name: add rotation lambda to secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-rotate" description: 'this is a change to this secret' state: present @@ -113,7 +113,7 @@ - result.changed - name: remove rotation lambda from secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-rotate" description: 'this is a change to this secret' state: present @@ -127,7 +127,7 @@ - result.changed - name: remove rotation lambda from secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-rotate" description: 'this is a change to this secret' state: present @@ -141,7 +141,7 @@ - not result.changed - name: remove secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-rotate" state: absent recovery_window: 0 @@ -149,7 +149,7 @@ always: - name: remove secret - aws_secret: + secretsmanager_secret: name: "{{ secret_name }}-rotate" state: absent recovery_window: 0 diff --git a/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/assert_defaults.yaml b/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/assert_defaults.yaml index 0f74d2f05..266822633 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/assert_defaults.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/assert_defaults.yaml @@ -5,8 +5,8 @@ - name: assert returned identity_arn assert: that: - - "result.identity_arn|regex_search('^arn:aws:ses:' + ec2_region + ':[0-9]*:identity/' + identity + '$')" - msg: "'{{ result.identity_arn}}' doesn't match regex '^arn:aws:ses:{{ ec2_region }}:[0-9]*:identity/{{ identity }}'" + - "result.identity_arn|regex_search('^arn:aws:ses:' + aws_region + ':[0-9]*:identity/' + identity + '$')" + msg: "'{{ result.identity_arn}}' doesn't match regex '^arn:aws:ses:{{ aws_region }}:[0-9]*:identity/{{ identity }}'" - name: assert verification_attributes.verification_status == 'Pending' assert: that: diff --git a/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/main.yaml b/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/main.yaml index 81ab3c4a7..3ecb68c38 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/main.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/ses_identity/tasks/main.yaml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -14,7 +14,7 @@ - name: test register email identity block: - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present register: result @@ -27,14 +27,14 @@ identity: "{{ email_identity }}" always: - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ - name: test register domain identity block: - name: register domain identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: result @@ -51,18 +51,18 @@ - result.verification_attributes.verification_token always: - name: cleanup domain identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test email_identity unchanged when already existing block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present - name: duplicate register identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present register: result @@ -75,18 +75,18 @@ identity: "{{ email_identity }}" always: - name: cleanup identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ - name: test domain_identity unchanged when already existing block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present - name: duplicate register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: result @@ -99,7 +99,7 @@ identity: "{{ domain_identity }}" always: - name: cleanup identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ @@ -110,7 +110,7 @@ - name: test register identity without explicit region block: - name: register email identity without explicit region - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present region: "{{ omit }}" @@ -126,35 +126,35 @@ identity: "{{ email_identity }}" always: - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ - name: test register email identity check mode block: - name: register email identity check mode - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present register: result check_mode: True - + - name: assert changed is True assert: that: - result.changed == True - + - import_tasks: assert_defaults.yaml vars: identity: "{{ email_identity }}" - + always: - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent register: result - + - name: assert nothing to clean up since check mode assert: that: @@ -163,35 +163,35 @@ - name: test register domain identity check mode block: - name: register domain identity check mode - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: result check_mode: True - + - name: assert changed is True assert: that: - result.changed == True - + - import_tasks: assert_defaults.yaml vars: identity: "{{ domain_identity }}" - + always: - name: cleanup domain identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent register: result - + - name: assert nothing to clean up since check mode assert: that: - result.changed == False # ============================================================ - name: remove non-existent email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent register: result @@ -201,7 +201,7 @@ - result.changed == False # ============================================================ - name: remove non-existent domain identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent register: result @@ -213,29 +213,29 @@ - name: test remove email identity check mode block: - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present register: result - + - name: remove email identity check mode - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent register: result check_mode: True - + - name: assert changed is True assert: that: - result.changed == True always: - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent register: result - + - name: assert something to clean up since remove was check mode assert: that: @@ -244,29 +244,29 @@ - name: test remove domain identity check mode block: - name: register domain identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: result - + - name: remove domain identity check mode - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent register: result check_mode: True - + - name: assert changed is True assert: that: - result.changed == True always: - name: cleanup domain identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent register: result - + - name: assert something to clean up since remove was check mode assert: that: @@ -284,7 +284,7 @@ - complaint - delivery - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present bounce_notifications: @@ -316,7 +316,7 @@ - complaint - delivery - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ @@ -332,11 +332,11 @@ - complaint - delivery - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present - name: set notification topics - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present bounce_notifications: @@ -366,7 +366,67 @@ - complaint - delivery - name: cleanup email identity - aws_ses_identity: + ses_identity: + identity: "{{ email_identity }}" + state: absent + # ============================================================ + - name: test clear notification configuration + block: + - name: test topic + sns_topic: + name: "{{ notification_queue_name }}-{{ item }}" + state: present + register: topic_info + with_items: + - bounce + - complaint + - delivery + - name: register email identity + ses_identity: + identity: "{{ email_identity }}" + state: present + bounce_notifications: + topic: "{{ topic_info.results[0].sns_arn }}" + complaint_notifications: + topic: "{{ topic_info.results[1].sns_arn }}" + delivery_notifications: + topic: "{{ topic_info.results[2].sns_arn }}" + - name: Make no change to identity + ses_identity: + identity: "{{ email_identity }}" + state: present + register: result + - name: assert no change + assert: + that: + - result.changed == False + + - name: clear notification settings + ses_identity: + identity: "{{ email_identity }}" + state: present + bounce_notifications: {} + complaint_notifications: {} + delivery_notifications: {} + register: result + - name: assert notification settings + assert: + that: + - result.changed == True + - "'bounce_topic' not in result.notification_attributes" + - "'delivery_topic' not in result.notification_attributes" + - "'complaint_topic' not in result.notification_attributes" + always: + - name: cleanup topics + sns_topic: + name: "{{ notification_queue_name }}-{{ item }}" + state: absent + with_items: + - bounce + - complaint + - delivery + - name: cleanup email identity + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ @@ -381,14 +441,14 @@ - bounce - complaint - delivery - + - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present - + - name: set notification settings check mode - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present bounce_notifications: @@ -403,12 +463,12 @@ feedback_forwarding: No register: result check_mode: True - + - name: assert changed is True assert: that: - result.changed == True - + - name: assert notification settings assert: that: @@ -419,13 +479,13 @@ - result.notification_attributes.complaint_topic == topic_info.results[1].sns_arn - result.notification_attributes.headers_in_complaint_notifications_enabled == True - result.notification_attributes.forwarding_enabled == False - + - name: re-register base email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present register: result - + - name: assert no change since notifications were check mode assert: that: @@ -437,7 +497,6 @@ - "'complaint_topic' not in result.notification_attributes" - result.notification_attributes.headers_in_complaint_notifications_enabled == False - result.notification_attributes.forwarding_enabled == True - always: - name: cleanup topics sns_topic: @@ -447,16 +506,16 @@ - bounce - complaint - delivery - + - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ - name: test include headers on notification queues block: - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present bounce_notifications: @@ -474,7 +533,7 @@ - result.notification_attributes.headers_in_delivery_notifications_enabled == True always: - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ @@ -489,7 +548,7 @@ - bounce - complaint - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present bounce_notifications: @@ -511,14 +570,14 @@ - bounce - complaint - name: cleanup email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ - name: test disable feedback forwarding fails if no topics block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present feedback_forwarding: No @@ -530,7 +589,7 @@ - '"Invalid Parameter Value" in result.msg' always: - name: cleanup identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ @@ -542,7 +601,7 @@ state: present register: topic_info - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present bounce_notifications: @@ -560,7 +619,7 @@ name: "{{ notification_queue_name }}-bounce" state: absent - name: cleanup identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent # ============================================================ @@ -572,7 +631,7 @@ state: present register: topic_info - name: register email identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: present complaint_notifications: @@ -590,6 +649,6 @@ name: "{{ notification_queue_name }}-complaint" state: absent - name: cleanup identity - aws_ses_identity: + ses_identity: identity: "{{ email_identity }}" state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/ses_identity_policy/tasks/main.yaml b/ansible_collections/community/aws/tests/integration/targets/ses_identity_policy/tasks/main.yaml index 5aa3d867b..8fe290b56 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ses_identity_policy/tasks/main.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/ses_identity_policy/tasks/main.yaml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -14,13 +14,13 @@ - name: test add identity policy block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: register identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" policy: "{{ lookup('template', 'policy.json.j2') }}" @@ -40,27 +40,27 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test add duplicate identity policy block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: register identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: register duplicate identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" policy: "{{ lookup('template', 'policy.json.j2') }}" @@ -80,20 +80,20 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test add identity policy by identity arn block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: register identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ identity_info.identity_arn }}" policy_name: "{{ policy_name }}" policy: "{{ lookup('template', 'policy.json.j2') }}" @@ -113,20 +113,20 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test add multiple identity policies block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: register identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}-{{ item }}" policy: "{{ lookup('template', 'policy.json.j2') }}" @@ -145,20 +145,20 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test add inline identity policy block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: register identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" policy: @@ -185,7 +185,7 @@ - result.policies|select('equalto', policy_name)|list|length == 1 - name: register duplicate identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" policy: @@ -207,27 +207,27 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test remove identity policy block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: register identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: delete identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" state: absent @@ -245,20 +245,20 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test remove missing identity policy block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: delete identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" state: absent @@ -276,20 +276,20 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent # ============================================================ - name: test add identity policy with invalid policy block: - name: register identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: present register: identity_info - name: register identity policy - aws_ses_identity_policy: + ses_identity_policy: identity: "{{ domain_identity }}" policy_name: "{{ policy_name }}" policy: '{"noSuchAttribute": 2}' @@ -304,6 +304,6 @@ always: - name: clean-up identity - aws_ses_identity: + ses_identity: identity: "{{ domain_identity }}" state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/active-rule-set-tests.yaml b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/active-rule-set-tests.yaml index ea79dbbcc..d83cd2f85 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/active-rule-set-tests.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/active-rule-set-tests.yaml @@ -10,10 +10,10 @@ - name: mark rule set active block: - name: create rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" - name: mark rule set active - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True register: result @@ -23,7 +23,7 @@ - result.changed == True - result.active == True - name: remark rule set active - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True register: result @@ -33,7 +33,7 @@ - result.changed == False always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -42,7 +42,7 @@ - name: create rule set active block: - name: create rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True register: result @@ -53,7 +53,7 @@ - result.active == True - "default_rule_set in result.rule_sets|map(attribute='name')" - name: remark rule set active - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True register: result @@ -63,7 +63,7 @@ - result.changed == False always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -72,11 +72,11 @@ - name: mark rule set inactive block: - name: create active rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: mark rule set inactive - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: False register: result @@ -86,7 +86,7 @@ - result.changed == True - result.active == False - name: remark rule set inactive - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: False register: result @@ -96,7 +96,7 @@ - result.changed == False always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -105,11 +105,11 @@ - name: Absent active flag does not change active status block: - name: create active rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: recreate rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" register: result - name: assert not changed and still active @@ -119,7 +119,7 @@ - result.active == True always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -128,11 +128,11 @@ - name: Cannot Remove Active Rule Set block: - name: create active rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: remove rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent register: result @@ -143,7 +143,7 @@ - "result.error.code == 'CannotDelete'" always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -152,11 +152,11 @@ - name: Remove Active Rule Set with Force block: - name: create active rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: force remove rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -168,7 +168,7 @@ - "default_rule_set not in result.rule_sets|map(attribute='name')" always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -177,15 +177,15 @@ - name: Force Remove of Inactive Rule Set does Not Affect Active Rule Set block: - name: create active rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: create inactive rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ second_rule_set }}" active: False - name: force remove inactiave rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ second_rule_set }}" state: absent force: True @@ -196,7 +196,7 @@ - result.changed == True - "second_rule_set not in result.rule_sets|map(attribute='name')" - name: remark active rule set active - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True register: result @@ -206,7 +206,7 @@ - result.changed == False always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ item }}" state: absent force: True @@ -218,11 +218,11 @@ - name: mark rule set inactive in check mode block: - name: create rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: mark rule set inactive in check mode - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: False register: result @@ -233,7 +233,7 @@ - result.changed == True - result.active == False - name: remark rule set inactive - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: False register: result @@ -243,7 +243,7 @@ - result.changed == True always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -252,11 +252,11 @@ - name: Cannot Remove Active Rule Set in check mode block: - name: create active rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: remove rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent register: result @@ -268,7 +268,7 @@ - "result.error.code == 'CannotDelete'" always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -277,11 +277,11 @@ - name: Remove Active Rule Set with Force in check mode block: - name: create active rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True - name: force remove rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -294,7 +294,7 @@ - "default_rule_set not in result.rule_sets|map(attribute='name')" always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True diff --git a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/cleanup-lock.yaml b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/cleanup-lock.yaml index 155bf472e..941e0148a 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/cleanup-lock.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/cleanup-lock.yaml @@ -9,7 +9,7 @@ - cloudwatchlogs_log_group: log_group_name: "{{ lock_attempt_log_group_name }}" state: absent - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/inactive-rule-set-tests.yaml b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/inactive-rule-set-tests.yaml index 845168c23..92321b3eb 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/inactive-rule-set-tests.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/inactive-rule-set-tests.yaml @@ -14,7 +14,7 @@ - name: test create rule sets block: - name: create rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" register: result - name: assert changed to exists inactive @@ -24,7 +24,7 @@ - result.active == False - "default_rule_set in result.rule_sets|map(attribute='name')" - name: recreate rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" register: result - name: assert changed is False @@ -33,7 +33,7 @@ - result.changed == False always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -41,7 +41,7 @@ - name: Remove No Such Rules Set block: - name: remove ruleset - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent register: result @@ -54,10 +54,10 @@ - name: Remove Inactive Rule Set block: - name: create rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" - name: remove rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent register: result @@ -68,7 +68,7 @@ - "default_rule_set not in result.rule_sets|map(attribute='name')" always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -76,7 +76,7 @@ - name: test create in check mode block: - name: create rule set in check mode - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" register: result check_mode: True @@ -88,7 +88,7 @@ - "default_rule_set in result.rule_sets|map(attribute='name')" always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -101,10 +101,10 @@ - name: mark rule set active in check mode block: - name: create rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" - name: mark rule set active in check mode - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: True register: result @@ -118,7 +118,7 @@ # it active again as that way this test can be run in # parallel - name: Ensure rule set is inactive - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" active: False register: result @@ -128,7 +128,7 @@ - result.changed == False always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True @@ -136,10 +136,10 @@ - name: Remove Inactive Rule Set in check mode block: - name: create rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" - name: remove rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent register: result @@ -151,7 +151,7 @@ - "default_rule_set not in result.rule_sets|map(attribute='name')" always: - name: cleanup rule set - aws_ses_rule_set: + ses_rule_set: name: "{{ default_rule_set }}" state: absent force: True diff --git a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/main.yaml b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/main.yaml index 4902b5c60..99938b774 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/main.yaml +++ b/ansible_collections/community/aws/tests/integration/targets/ses_rule_set/tasks/main.yaml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml index 16ad00270..9745064c9 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml @@ -1,2 +1,2 @@ -default_botocore_version: '1.21.0' -default_boto3_version: '1.18.0' +default_botocore_version: "{{ lookup('amazon.aws.aws_collection_constants', 'MINIMUM_BOTOCORE_VERSION') }}" +default_boto3_version: "{{ lookup('amazon.aws.aws_collection_constants', 'MINIMUM_BOTO3_VERSION') }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml index ec7cf0ec6..f7ac20eee 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml @@ -4,7 +4,15 @@ instance_type: t3.micro ami_details: fedora: owner: 125523088429 - name: Fedora-Cloud-Base-34-1.2.x86_64* + name: 'Fedora-Cloud-Base-41-1.2.x86_64*' + user_data: | + #!/bin/sh + sudo dnf install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + sudo systemctl start amazon-ssm-agent + os_type: linux + centos: + owner: 125523088429 + name: 'CentOS Stream 9 x86_64*' user_data: | #!/bin/sh sudo dnf install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm @@ -25,6 +33,8 @@ ami_details: # name: ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server* user_data: | #!/bin/sh + apt-get update + apt-get --yes install acl # Pre-Installed just needs started sudo systemctl start amazon-ssm-agent os_type: linux diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/cleanup.yml b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/cleanup.yml index 6171e5eb6..fce828a3c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/cleanup.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/cleanup.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -68,11 +68,12 @@ iam_role: name: "{{ iam_role_name }}" state: absent + delete_instance_profile: True ignore_errors: yes when: iam_role_vars_file.stat.exists == true - name: Delete the KMS key - aws_kms: + kms_key: state: absent alias: '{{ kms_key_name }}' diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/connection_args.yml b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/connection_args.yml index 727220e49..8d5c4b714 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/connection_args.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/connection_args.yml @@ -3,9 +3,9 @@ # As a lookup plugin we don't have access to module_defaults connection_args: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" connection_env: AWS_DEFAULT_REGION: "{{ aws_region }}" AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/encryption.yml b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/encryption.yml index 949892d18..1379b0428 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/encryption.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/encryption.yml @@ -1,7 +1,7 @@ --- ## Task file for setup/teardown AWS resources for aws_ssm integration testing - name: create a KMS key - aws_kms: + kms_key: alias: '{{ kms_key_name }}' grants: - name: SSM-Agent-Access diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/main.yml index 830bd5fcc..6c29c4154 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_connection_aws_ssm/tasks/main.yml @@ -5,9 +5,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml index 6fbe55e83..11a1e561e 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml @@ -1,6 +1,7 @@ +--- # CentOS Community Platform Engineering (CPE) -ec2_ami_owner_id: '125523088429' -#ec2_ami_name: 'Fedora-Cloud-Base-*.x86_64*' -ec2_ami_name: 'CentOS Stream 9 x86_64*' -#ec2_ami_ssh_user: 'fedora' -ec2_ami_ssh_user: 'centos' +ec2_ami_owner_id: "125523088429" +# ec2_ami_name: 'Fedora-Cloud-Base-*.x86_64*' +ec2_ami_name: CentOS Stream 9 x86_64* +# ec2_ami_ssh_user: 'fedora' +ec2_ami_ssh_user: centos diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml index f41791073..bd059c866 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml @@ -8,46 +8,47 @@ # rather than hardcoding the IDs so we're not limited to specific Regions # - ec2_ami_id # -- module_defaults: +- name: Setup common EC2 related facts. + module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" - run_once: True + run_once: true block: # ============================================================ - - name: Get available AZs - aws_az_info: - filters: - region-name: '{{ aws_region }}' - register: _az_info + - name: Get available AZs + amazon.aws.aws_az_info: + filters: + region-name: "{{ aws_region }}" + register: _az_info - - name: Pick an AZ - set_fact: - ec2_availability_zone_names: '{{ _az_info.availability_zones | selectattr("zone_name", "defined") | map(attribute="zone_name") | list }}' + - name: Pick an AZ + ansible.builtin.set_fact: + ec2_availability_zone_names: '{{ _az_info.availability_zones | selectattr("zone_name", "defined") | map(attribute="zone_name") | list }}' - # ============================================================ + # ============================================================ - - name: Get a list of images - ec2_ami_info: - filters: - name: '{{ ec2_ami_name }}' - owner-id: '{{ ec2_ami_owner_id }}' - architecture: x86_64 - virtualization-type: hvm - root-device-type: ebs - register: _images_info - # Very spammy - no_log: True + - name: Get a list of images + amazon.aws.ec2_ami_info: + filters: + name: "{{ ec2_ami_name }}" + owner-id: "{{ ec2_ami_owner_id }}" + architecture: x86_64 + virtualization-type: hvm + root-device-type: ebs + register: _images_info + # Very spammy + no_log: true - - name: Set Fact for latest AMI - vars: - latest_image: '{{ _images_info.images | sort(attribute="creation_date") | reverse | first }}' - set_fact: - ec2_ami_id: '{{ latest_image.image_id }}' - ec2_ami_details: '{{ latest_image }}' - ec2_ami_root_disk: '{{ latest_image.block_device_mappings[0].device_name }}' - ec2_ami_ssh_user: '{{ ec2_ami_ssh_user }}' + - name: Set Fact for latest AMI + vars: + latest_image: '{{ _images_info.images | sort(attribute="creation_date") | reverse | first }}' + ansible.builtin.set_fact: + ec2_ami_id: "{{ latest_image.image_id }}" + ec2_ami_details: "{{ latest_image }}" + ec2_ami_root_disk: "{{ latest_image.block_device_mappings[0].device_name }}" + ec2_ami_ssh_user: "{{ ec2_ami_ssh_user }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py b/ansible_collections/community/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py index ea2f51b0f..04d2eb1ea 100644 --- a/ansible_collections/community/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py +++ b/ansible_collections/community/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py @@ -8,24 +8,26 @@ ssh-keygen -f id_rsa.pub -e -m PKCS8 | openssl pkey -pubin -outform DER | openss (but without needing the OpenSSL CLI) """ -from __future__ import absolute_import, division, print_function -__metaclass__ = type import hashlib import sys -from Crypto.PublicKey import RSA + +from cryptography.hazmat.primitives import serialization if len(sys.argv) == 0: ssh_public_key = "id_rsa.pub" else: ssh_public_key = sys.argv[1] -with open(ssh_public_key, 'r') as key_fh: - data = key_fh.read() - -# Convert from SSH format to DER format -public_key = RSA.importKey(data).exportKey('DER') -md5digest = hashlib.md5(public_key).hexdigest() +with open(ssh_public_key, "rb") as key_file: + public_key = serialization.load_ssh_public_key( + key_file.read(), + ) +pub_der = public_key.public_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PublicFormat.SubjectPublicKeyInfo, +) +md5digest = hashlib.md5(pub_der).hexdigest() # Format the md5sum into the normal format pairs = zip(md5digest[::2], md5digest[1::2]) md5string = ":".join(["".join(pair) for pair in pairs]) diff --git a/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml index 42ef9b190..99be6b218 100644 --- a/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml @@ -1,9 +1,9 @@ - name: set up AWS connection info module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' collections: - amazon.aws diff --git a/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py b/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py index 98f657836..99c6a8105 100644 --- a/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py +++ b/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py @@ -1,6 +1,9 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type diff --git a/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml index d5b389e4d..00f3f71d9 100644 --- a/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml @@ -1,8 +1,8 @@ - module_defaults: group/aws: - aws_secret_key: '{{ aws_secret_key }}' - aws_access_key: '{{ aws_access_key }}' - security_token: '{{ security_token|default(omit) }}' + secret_key: '{{ aws_secret_key }}' + access_key: '{{ aws_access_key }}' + session_token: '{{ security_token|default(omit) }}' region: '{{ aws_region }}' block: @@ -62,7 +62,7 @@ that: - sns_topic_info is successful - "'result' in sns_topic_info" - - sns_topic_info.result["sns_arn"] == "{{ sns_arn }}" + - sns_topic_info.result["sns_arn"] == sns_arn - "'sns_topic' in sns_topic_info.result" - "'display_name' in sns_topic_info.result['sns_topic']" - sns_topic_info.result["sns_topic"]["display_name"] == "My topic name" @@ -79,7 +79,7 @@ that: - sns_topic_info is successful - "'result' in sns_topic_info" - - sns_topic_info.result["sns_arn"] == "{{ sns_arn }}" + - sns_topic_info.result["sns_arn"] == sns_arn - "'sns_topic' in sns_topic_info.result" - "'display_name' in sns_topic_info.result['sns_topic']" - sns_topic_info.result["sns_topic"]["display_name"] == "My topic name" @@ -110,7 +110,7 @@ that: - sns_fifo_topic.changed - sns_fifo_topic.sns_topic.topic_type == 'fifo' - - sns_fifo_topic.sns_topic.name == '{{ sns_topic_topic_name }}-fifo' + - sns_fifo_topic.sns_topic.name == sns_topic_topic_name ~ '-fifo' - name: Run create a FIFO topic again for idempotence test (with .fifo) sns_topic: diff --git a/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml index bcba06c8f..4c16be313 100644 --- a/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml @@ -3,9 +3,9 @@ module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: @@ -19,7 +19,7 @@ assert: that: - create_result.changed - - create_result.region == "{{ aws_region }}" + - create_result.region == aws_region always: - name: Test deleting SQS queue diff --git a/ansible_collections/community/aws/tests/integration/targets/ssm_parameter/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/ssm_parameter/tasks/main.yml index ac461392a..7c0e27fee 100644 --- a/ansible_collections/community/aws/tests/integration/targets/ssm_parameter/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/ssm_parameter/tasks/main.yml @@ -3,9 +3,9 @@ # As a lookup plugin we don't have access to module_defaults connection_args: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" no_log: True - name: 'aws_ssm lookup plugin integration tests' @@ -13,9 +13,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' vars: simple_name: '/{{ ssm_key_prefix }}/Simple' @@ -87,7 +87,7 @@ # Create - name: Create key/value pair in aws parameter store (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ simple_description }}' value: '{{ simple_value }}' @@ -98,7 +98,7 @@ - result is changed - name: Create key/value pair in aws parameter store - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ simple_description }}' value: '{{ simple_value }}' @@ -129,7 +129,7 @@ - result.parameter_metadata.type == 'String' - name: Create key/value pair in aws parameter store - idempotency (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ simple_description }}' value: '{{ simple_value }}' @@ -140,7 +140,7 @@ - result is not changed - name: Create key/value pair in aws parameter store - idempotency - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ simple_description }}' value: '{{ simple_value }}' @@ -174,7 +174,7 @@ # Update description - name: Update description (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ updated_description }}' register: result @@ -184,7 +184,7 @@ - result is changed - name: Update description - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ updated_description }}' register: result @@ -214,7 +214,7 @@ - result.parameter_metadata.type == 'String' - name: Update description - idempotency (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ updated_description }}' register: result @@ -224,7 +224,7 @@ - result is not changed - name: Update description - idempotency - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' description: '{{ updated_description }}' register: result @@ -258,7 +258,7 @@ # Update value - name: Update key/value pair in aws parameter store (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ updated_value }}' register: result @@ -268,7 +268,7 @@ - result is changed - name: Update key/value pair in aws parameter store - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ updated_value }}' register: result @@ -298,7 +298,7 @@ - result.parameter_metadata.type == 'String' - name: Update key/value pair in aws parameter store - idempotency (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ updated_value }}' register: result @@ -308,7 +308,7 @@ - result is not changed - name: Update key/value pair in aws parameter store - idempotency - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ updated_value }}' register: result @@ -341,7 +341,7 @@ # Complex update - name: Complex update to key/value pair in aws parameter store (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ simple_value }}' description: '{{ simple_description }}' @@ -352,7 +352,7 @@ - result is changed - name: Complex update to key/value pair in aws parameter store - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ simple_value }}' description: '{{ simple_description }}' @@ -383,7 +383,7 @@ - result.parameter_metadata.type == 'String' - name: Complex update to key/value pair in aws parameter store - idempotency (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ simple_value }}' description: '{{ simple_description }}' @@ -394,7 +394,7 @@ - result is not changed - name: Complex update to key/value pair in aws parameter store - idempotency - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ simple_value }}' description: '{{ simple_description }}' @@ -428,7 +428,7 @@ # Delete - name: Delete key/value pair in aws parameter store (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' state: absent register: result @@ -438,7 +438,7 @@ - result is changed - name: Delete key/value pair in aws parameter store - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' state: absent register: result @@ -454,7 +454,7 @@ - info_result is failed - name: Delete key/value pair in aws parameter store - idempotency (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' state: absent register: result @@ -464,7 +464,7 @@ - result is not changed - name: Delete key/value pair in aws parameter store - idempotency - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' state: absent register: result @@ -474,7 +474,7 @@ - result is not changed - name: Create key/value pair in aws parameter store with no description - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ simple_value }}' register: result @@ -485,7 +485,7 @@ - '"description" not in result.parameter_metadata' - name: Add a description - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_name }}' value: '{{ simple_value }}' description: '{{ simple_description }}' @@ -501,7 +501,7 @@ # Test tags - Create parameter with tags case - name: Create parameter with tags case - Create parameter (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -513,7 +513,7 @@ - result is changed - name: Create parameter with tags case - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -523,7 +523,7 @@ - name: Create parameter with tags case - Ensure tags is correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_orig['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_orig[item.key] loop: "{{ simple_tags_orig | dict2items }}" - name: Create parameter with tags case - Ensure no missing or additional tags @@ -560,7 +560,7 @@ # Test tags - Update description only case - name: Update description only case - Update parameter (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_updated_description }}' register: result @@ -570,7 +570,7 @@ - result is changed - name: Update description only case - Update parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_updated_description }}' register: result @@ -578,7 +578,7 @@ - name: Update description only case - Ensure expected tags is correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_orig['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_orig[item.key] loop: "{{ simple_tags_orig | dict2items }}" - name: Update description only case - Ensure no missing or additional tags @@ -615,7 +615,7 @@ # Test tags - Add tag to existing parameter case - name: Add tag to existing parameter case - Update parameter (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_add_owner }}' register: result @@ -625,7 +625,7 @@ - result is changed - name: Add tag to existing parameter case - Update parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_add_owner }}' register: result @@ -633,7 +633,7 @@ - name: Add tag to existing parameter case - Ensure tags correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_add_owner['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_add_owner[item.key] loop: "{{ simple_tags_add_owner | dict2items }}" - name: Add tag to existing parameter case - Ensure no missing or additional tags @@ -667,7 +667,7 @@ - result.parameter_metadata.type == 'String' - name: Add tag to existing parameter case - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -678,7 +678,7 @@ # Test tags - update tags only - change tag - name: Change single tag case - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -686,7 +686,7 @@ register: result - name: Change single tag case - Update tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_change_environment }}' register: result @@ -696,7 +696,7 @@ - result is changed - name: Change single tag case - Update tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_change_environment }}' register: result @@ -704,7 +704,7 @@ - name: Change single tag case - Ensure expected tags is correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_change_environment['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_change_environment[item.key] loop: "{{ simple_tags_change_environment | dict2items }}" - name: Change single tag case - Ensure no missing or additional tags @@ -738,7 +738,7 @@ - result.parameter_metadata.type == 'String' - name: Change single tag case - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -749,7 +749,7 @@ # Test tags - delete tag case - name: Delete single tag case - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -757,7 +757,7 @@ register: result - name: Delete single tag case - Update tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_delete_version }}' register: result @@ -767,7 +767,7 @@ - result is changed - name: Delete single tag case - Update tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_delete_version }}' register: result @@ -775,7 +775,7 @@ - name: Delete single tag case - Ensure expected tags is correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_delete_version['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_delete_version[item.key] loop: "{{ simple_tags_delete_version | dict2items }}" - name: Delete single tag case - Ensure no missing or additional tags @@ -809,7 +809,7 @@ - result.parameter_metadata.type == 'String' - name: Delete single tag case - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -820,7 +820,7 @@ # Test tags - delete tag w/ spaces case - name: Delete single tag w/ spaces case - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -828,7 +828,7 @@ register: result - name: Delete single tag w/ spaces case - Update tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_delete_tag_with_space }}' register: result @@ -838,7 +838,7 @@ - result is changed - name: Delete single tag w/ spaces case - Update tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_delete_tag_with_space }}' register: result @@ -846,7 +846,7 @@ - name: Delete single tag w/ spaces case - Ensure expected tags is correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_delete_tag_with_space['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_delete_tag_with_space[item.key] loop: "{{ simple_tags_delete_tag_with_space | dict2items }}" - name: Delete single tag w/ spaces case - Ensure no missing or additional tags @@ -880,7 +880,7 @@ - result.parameter_metadata.type == 'String' - name: Delete single tag w/ spaces case - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -891,7 +891,7 @@ # Test tags - Add/delete/change tags case - name: Add/delete/change tags case - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -899,7 +899,7 @@ register: result - name: Add/delete/change tags case - Update tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_add_delete_change }}' register: result @@ -909,7 +909,7 @@ - result is changed - name: Add/delete/change tags case - Update tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_add_delete_change }}' register: result @@ -917,7 +917,7 @@ - name: Add/delete/change tags case - Ensure expected tags is correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_add_delete_change['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_add_delete_change[item.key] loop: "{{ simple_tags_add_delete_change | dict2items }}" - name: Add/delete/change tags case - Ensure no missing or additional tags @@ -951,7 +951,7 @@ - result.parameter_metadata.type == 'String' - name: Add/delete/change tags case - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -962,7 +962,7 @@ # Test tags - Delete all tags case - name: Delete all tags case - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -970,7 +970,7 @@ register: result - name: Delete all tags case - Update tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_delete_all_tags }}' register: result @@ -980,7 +980,7 @@ - result is changed - name: Delete all tags case - Update tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_delete_all_tags }}' register: result @@ -988,7 +988,7 @@ - name: Delete all tags case - Ensure expected tags is correct assert: that: - - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_delete_all_tags['{{ item.key }}'] + - result.parameter_metadata.tags[item.key] == simple_tags_delete_all_tags[item.key] loop: "{{ simple_tags_delete_all_tags | dict2items }}" - name: Delete all tags case - Ensure no missing or additional tags @@ -1022,7 +1022,7 @@ - result.parameter_metadata.type == 'String' - name: Delete all tags case - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -1033,7 +1033,7 @@ # Test tags - Add tag case (purge_tags=false) - name: Add tag case (purge_tags=false) - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -1041,7 +1041,7 @@ register: result - name: Add tag case (purge_tags=false) - Add tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_add_owner }}' purge_tags: False @@ -1052,7 +1052,7 @@ - result is changed - name: Add tag case (purge_tags=false) - Add tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_add_owner }}' purge_tags: False @@ -1062,8 +1062,8 @@ assert: that: - > - result.parameter_metadata.tags['{{ item.key }}'] == - (simple_tags_orig | combine(simple_tags_purge_false_add_owner))['{{ item.key }}'] + result.parameter_metadata.tags[item.key] == + (simple_tags_orig | combine(simple_tags_purge_false_add_owner))[item.key] loop: > {{ simple_tags_orig | combine(simple_tags_purge_false_add_owner) | dict2items }} @@ -1071,8 +1071,8 @@ assert: that: - > - result.parameter_metadata.tags | length == {{ simple_tags_orig | - combine(simple_tags_purge_false_add_owner) | dict2items }} | length + result.parameter_metadata.tags | length == simple_tags_orig | + combine(simple_tags_purge_false_add_owner) | dict2items | length - name: Add tag case (purge_tags=false) - Lookup a tagged parameter set_fact: @@ -1100,7 +1100,7 @@ - result.parameter_metadata.type == 'String' - name: Add tag case (purge_tags=false) - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -1111,7 +1111,7 @@ # Test tags - Add multiple tags case (purge_tags=false) - name: Add multiple tags case (purge_tags=false) - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -1119,7 +1119,7 @@ register: result - name: Add multiple tags case (purge_tags=false) - Add tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_add_multiple }}' purge_tags: False @@ -1130,7 +1130,7 @@ - result is changed - name: Add multiple tags case (purge_tags=false) - Add tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_add_multiple }}' purge_tags: False @@ -1140,8 +1140,8 @@ assert: that: - > - result.parameter_metadata.tags['{{ item.key }}'] == - (simple_tags_orig | combine(simple_tags_purge_false_add_multiple))['{{ item.key }}'] + result.parameter_metadata.tags[item.key] == + (simple_tags_orig | combine(simple_tags_purge_false_add_multiple))[item.key] loop: > {{ simple_tags_orig | combine(simple_tags_purge_false_add_multiple) | dict2items }} @@ -1149,8 +1149,8 @@ assert: that: - > - result.parameter_metadata.tags | length == {{ simple_tags_orig | - combine(simple_tags_purge_false_add_multiple) | dict2items }} | length + result.parameter_metadata.tags | length == simple_tags_orig | + combine(simple_tags_purge_false_add_multiple) | dict2items | length - name: Add multiple tags case (purge_tags=false) - Lookup a tagged parameter set_fact: @@ -1178,7 +1178,7 @@ - result.parameter_metadata.type == 'String' - name: Add multiple tags case (purge_tags=false) - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -1189,7 +1189,7 @@ # Test tags - Change tag case (purge_tags=false) - name: Change tag case (purge_tags=false) - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -1197,7 +1197,7 @@ register: result - name: Change tag case (purge_tags=false) - Change tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_change_environment}}' purge_tags: False @@ -1208,7 +1208,7 @@ - result is changed - name: Change tag case (purge_tags=false) - Change tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_change_environment }}' purge_tags: False @@ -1218,8 +1218,8 @@ assert: that: - > - result.parameter_metadata.tags['{{ item.key }}'] == - (simple_tags_orig | combine(simple_tags_purge_false_change_environment))['{{ item.key }}'] + result.parameter_metadata.tags[item.key] == + (simple_tags_orig | combine(simple_tags_purge_false_change_environment))[item.key] loop: > {{ simple_tags_orig | combine(simple_tags_purge_false_change_environment) | dict2items }} loop_control: @@ -1230,8 +1230,8 @@ assert: that: - > - result.parameter_metadata.tags | length == {{ simple_tags_orig | - combine(simple_tags_purge_false_change_environment) | dict2items }} | length + result.parameter_metadata.tags | length == simple_tags_orig | + combine(simple_tags_purge_false_change_environment) | dict2items | length - name: Change tag case (purge_tags=false) - Lookup a tagged parameter set_fact: @@ -1259,7 +1259,7 @@ - result.parameter_metadata.type == 'String' - name: Change tag case (purge_tags=false) - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -1270,7 +1270,7 @@ # Test tags - Change multiple tags case (purge_tags=false) - name: Change multiple tags (purge_tags=false) - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -1278,7 +1278,7 @@ register: result - name: Change multiple tags (purge_tags=false) - Change tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_change_multiple}}' purge_tags: False @@ -1289,7 +1289,7 @@ - result is changed - name: Change multiple tags (purge_tags=false) - Change tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_change_multiple }}' purge_tags: False @@ -1299,8 +1299,8 @@ assert: that: - > - result.parameter_metadata.tags['{{ item.key }}'] == - (simple_tags_orig | combine(simple_tags_purge_false_change_multiple))['{{ item.key }}'] + result.parameter_metadata.tags[item.key] == + (simple_tags_orig | combine(simple_tags_purge_false_change_multiple))[item.key] loop: > {{ simple_tags_orig | combine(simple_tags_purge_false_change_multiple) | dict2items }} loop_control: @@ -1311,8 +1311,8 @@ assert: that: - > - result.parameter_metadata.tags | length == {{ simple_tags_orig | - combine(simple_tags_purge_false_change_multiple) | dict2items }} | length + result.parameter_metadata.tags | length == simple_tags_orig | + combine(simple_tags_purge_false_change_multiple) | dict2items | length - name: Change multiple tags (purge_tags=false) - Lookup a tagged parameter set_fact: @@ -1340,7 +1340,7 @@ - result.parameter_metadata.type == 'String' - name: Change multiple tags (purge_tags=false) - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -1351,7 +1351,7 @@ # Test tags - Add/Change multiple tags case (purge_tags=false) - name: Add/Change multiple tags (purge_tags=false) - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -1359,7 +1359,7 @@ register: result - name: Add/Change multiple tags (purge_tags=false) - Change tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_add_and_change}}' purge_tags: False @@ -1370,7 +1370,7 @@ - result is changed - name: Add/Change multiple tags (purge_tags=false) - Change tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: '{{ simple_tags_purge_false_add_and_change }}' purge_tags: False @@ -1380,8 +1380,8 @@ assert: that: - > - result.parameter_metadata.tags['{{ item.key }}'] == - (simple_tags_orig | combine(simple_tags_purge_false_add_and_change))['{{ item.key }}'] + result.parameter_metadata.tags[item.key] == + (simple_tags_orig | combine(simple_tags_purge_false_add_and_change))[item.key] loop: > {{ simple_tags_orig | combine(simple_tags_purge_false_add_and_change) | dict2items }} loop_control: @@ -1392,8 +1392,8 @@ assert: that: - > - result.parameter_metadata.tags | length == {{ simple_tags_orig | - combine(simple_tags_purge_false_add_and_change) | dict2items }} | length + result.parameter_metadata.tags | length == simple_tags_orig | + combine(simple_tags_purge_false_add_and_change) | dict2items | length - name: Add/Change multiple tags (purge_tags=false) - Lookup a tagged parameter set_fact: @@ -1421,7 +1421,7 @@ - result.parameter_metadata.type == 'String' - name: Add/Change multiple tags (purge_tags=false) - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -1432,7 +1432,7 @@ # Test tags - Empty tags dict case (purge_tags=false) # should be no change - name: Empty tags dict (purge_tags=false) - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -1440,7 +1440,7 @@ register: result - name: Empty tags dict (purge_tags=false) - Change tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: {} purge_tags: False @@ -1451,7 +1451,7 @@ - result != 'changed' - name: Empty tags dict (purge_tags=false) - Change tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' tags: {} purge_tags: False @@ -1461,7 +1461,7 @@ assert: that: - > - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_orig['{{ item.key }}'] + result.parameter_metadata.tags[item.key] == simple_tags_orig[item.key] loop: > {{ simple_tags_orig | dict2items }} loop_control: @@ -1472,7 +1472,7 @@ that: - > result.parameter_metadata.tags | length - == {{ simple_tags_orig | dict2items }} | length + == simple_tags_orig | dict2items | length - name: Empty tags dict (purge_tags=false) - Lookup a tagged parameter set_fact: @@ -1500,7 +1500,7 @@ - result.parameter_metadata.type == 'String' - name: Empty tags dict (purge_tags=false) - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True @@ -1511,7 +1511,7 @@ # Test tags - No tags parameter (purge_tags=true) case # should be no change - name: No tags parameter (purge_tags=true) - Create parameter - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_description }}' value: '{{ simple_tag_param_value }}' @@ -1519,7 +1519,7 @@ register: result - name: No tags parameter (purge_tags=true) - Change tag (CHECK) - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_updated_description }}' register: result @@ -1529,7 +1529,7 @@ - result is changed - name: No tags parameter (purge_tags=true) - Change tag - aws_ssm_parameter_store: + ssm_parameter: name: '{{ simple_tag_param_name }}' description: '{{ simple_tag_param_updated_description }}' register: result @@ -1538,8 +1538,8 @@ assert: that: - > - result.parameter_metadata.tags['{{ item.key }}'] - == simple_tags_orig['{{ item.key }}'] + result.parameter_metadata.tags[item.key] + == simple_tags_orig[item.key] loop: > {{ simple_tags_orig | dict2items }} loop_control: @@ -1550,7 +1550,7 @@ that: - > result.parameter_metadata.tags | length - == {{ simple_tags_orig | dict2items }} | length + == simple_tags_orig | dict2items | length - name: No tags parameter (purge_tags=true) - Lookup a tagged parameter set_fact: @@ -1578,7 +1578,7 @@ - result.parameter_metadata.type == 'String' - name: No tags parameter (purge_tags=true) - Delete parameter - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: true @@ -1588,7 +1588,7 @@ always: # ============================================================ - name: Delete remaining key/value pairs in aws parameter store - aws_ssm_parameter_store: + ssm_parameter: name: "{{item}}" state: absent ignore_errors: True diff --git a/ansible_collections/community/aws/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml index 8c4bbec71..061acb2c3 100644 --- a/ansible_collections/community/aws/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml @@ -3,9 +3,9 @@ - name: Integration test for AWS Step Function state machine module module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - amazon.aws @@ -33,7 +33,7 @@ # ==== Tests =================================================== - name: Create a new state machine -- check_mode - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" definition: "{{ lookup('file','state_machine.json') }}" role_arn: "{{ step_functions_role.iam_role.arn }}" @@ -49,7 +49,7 @@ - creation_check.output == 'State machine would be created.' - name: Create a new state machine - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" definition: "{{ lookup('file','state_machine.json') }}" role_arn: "{{ step_functions_role.iam_role.arn }}" @@ -68,7 +68,7 @@ seconds: 5 - name: Idempotent rerun of same state function -- check_mode - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" definition: "{{ lookup('file','state_machine.json') }}" role_arn: "{{ step_functions_role.iam_role.arn }}" @@ -84,7 +84,7 @@ - result.output == 'State is up-to-date.' - name: Idempotent rerun of same state function - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" definition: "{{ lookup('file','state_machine.json') }}" role_arn: "{{ step_functions_role.iam_role.arn }}" @@ -99,7 +99,7 @@ - result.state_machine_arn == creation_output.state_machine_arn - name: Update an existing state machine -- check_mode - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" definition: "{{ lookup('file','alternative_state_machine.json') }}" role_arn: "{{ step_functions_role.iam_role.arn }}" @@ -112,10 +112,10 @@ - assert: that: - update_check.changed == True - - "update_check.output == 'State machine would be updated: {{ creation_output.state_machine_arn }}'" + - "update_check.output == 'State machine would be updated: ' ~ creation_output.state_machine_arn" - name: Update an existing state machine - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" definition: "{{ lookup('file','alternative_state_machine.json') }}" role_arn: "{{ step_functions_role.iam_role.arn }}" @@ -130,7 +130,7 @@ - update_output.state_machine_arn == creation_output.state_machine_arn - name: Start execution of state machine -- check_mode - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: name: "{{ execution_name }}" execution_input: "{}" state_machine_arn: "{{ creation_output.state_machine_arn }}" @@ -143,7 +143,7 @@ - "start_execution_output.output == 'State machine execution would be started.'" - name: Start execution of state machine - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: name: "{{ execution_name }}" execution_input: "{}" state_machine_arn: "{{ creation_output.state_machine_arn }}" @@ -156,7 +156,7 @@ - "'start_date' in start_execution_output" - name: Start execution of state machine (check for idempotency) (check mode) - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: name: "{{ execution_name }}" execution_input: "{}" state_machine_arn: "{{ creation_output.state_machine_arn }}" @@ -169,7 +169,7 @@ - "start_execution_output_idem_check.output == 'State machine execution already exists.'" - name: Start execution of state machine (check for idempotency) - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: name: "{{ execution_name }}" execution_input: "{}" state_machine_arn: "{{ creation_output.state_machine_arn }}" @@ -180,7 +180,7 @@ - not start_execution_output_idem.changed - name: Stop execution of state machine -- check_mode - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: action: stop execution_arn: "{{ start_execution_output.execution_arn }}" cause: "cause of the failure" @@ -194,7 +194,7 @@ - "stop_execution_output.output == 'State machine execution would be stopped.'" - name: Stop execution of state machine - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: action: stop execution_arn: "{{ start_execution_output.execution_arn }}" cause: "cause of the failure" @@ -207,7 +207,7 @@ - "'stop_date' in stop_execution_output" - name: Stop execution of state machine (check for idempotency) - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: action: stop execution_arn: "{{ start_execution_output.execution_arn }}" cause: "cause of the failure" @@ -219,7 +219,7 @@ - not stop_execution_output.changed - name: Try stopping a non-running execution -- check_mode - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: action: stop execution_arn: "{{ start_execution_output.execution_arn }}" cause: "cause of the failure" @@ -233,7 +233,7 @@ - "stop_execution_output.output == 'State machine execution is not running.'" - name: Try stopping a non-running execution - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: action: stop execution_arn: "{{ start_execution_output.execution_arn }}" cause: "cause of the failure" @@ -246,7 +246,7 @@ - not stop_execution_output.changed - name: Start execution of state machine with the same execution name - aws_step_functions_state_machine_execution: + stepfunctions_state_machine_execution: name: "{{ execution_name }}" state_machine_arn: "{{ creation_output.state_machine_arn }}" register: start_execution_output_again @@ -256,7 +256,7 @@ - not start_execution_output_again.changed - name: Remove state machine -- check_mode - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" state: absent register: deletion_check @@ -265,10 +265,10 @@ - assert: that: - deletion_check.changed == True - - "deletion_check.output == 'State machine would be deleted: {{ creation_output.state_machine_arn }}'" + - "deletion_check.output == 'State machine would be deleted: ' ~ creation_output.state_machine_arn" - name: Remove state machine - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" state: absent register: deletion_output @@ -279,7 +279,7 @@ - deletion_output.state_machine_arn == creation_output.state_machine_arn - name: Non-existent state machine is absent - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "non_existing_state_machine" state: absent register: result @@ -293,7 +293,7 @@ always: - name: Cleanup - delete state machine - aws_step_functions_state_machine: + stepfunctions_state_machine: name: "{{ state_machine_name }}" state: absent ignore_errors: true diff --git a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/defaults/main.yml b/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/defaults/main.yml deleted file mode 100644 index 17072d6a4..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ -iam_role_name: "ansible-test-{{ tiny_prefix }}" diff --git a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/meta/main.yml b/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/tasks/main.yml deleted file mode 100644 index be684dcea..000000000 --- a/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/tasks/main.yml +++ /dev/null @@ -1,332 +0,0 @@ ---- -# tasks file for sts_assume_role - -- module_defaults: - group/aws: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - collections: - - amazon.aws - block: - # Get some information about who we are before starting our tests - # we'll need this as soon as we start working on the policies - - name: get ARN of calling user - aws_caller_info: - register: aws_caller_info - - - name: register account id - set_fact: - aws_account: "{{ aws_caller_info.account }}" - - # ============================================================ - - name: create test iam role - iam_role: - name: "{{ iam_role_name }}" - assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" - create_instance_profile: False - managed_policy: - - arn:aws:iam::aws:policy/IAMReadOnlyAccess - state: present - register: test_role - - # ============================================================ - - name: pause to ensure role exists before using - pause: - seconds: 30 - - # ============================================================ - - name: test with no parameters - sts_assume_role: - aws_access_key: '{{ omit }}' - aws_secret_key: '{{ omit }}' - security_token: '{{ omit }}' - register: result - ignore_errors: true - - - name: assert with no parameters - assert: - that: - - 'result.failed' - - "'missing required arguments:' in result.msg" - - # ============================================================ - - name: test with empty parameters - sts_assume_role: - role_arn: - role_session_name: - policy: - duration_seconds: - external_id: - mfa_token: - mfa_serial_number: - register: result - ignore_errors: true - - - name: assert with empty parameters - assert: - that: - - 'result.failed' - - "'Missing required parameter in input:' in result.msg" - when: result.module_stderr is not defined - - - name: assert with empty parameters - assert: - that: - - 'result.failed' - - "'Member must have length greater than or equal to 20' in result.module_stderr" - when: result.module_stderr is defined - - # ============================================================ - - name: test with only 'role_arn' parameter - sts_assume_role: - role_arn: "{{ test_role.iam_role.arn }}" - register: result - ignore_errors: true - - - name: assert with only 'role_arn' parameter - assert: - that: - - 'result.failed' - - "'missing required arguments: role_session_name' in result.msg" - - # ============================================================ - - name: test with only 'role_session_name' parameter - sts_assume_role: - role_session_name: "AnsibleTest" - register: result - ignore_errors: true - - - name: assert with only 'role_session_name' parameter - assert: - that: - - 'result.failed' - - "'missing required arguments: role_arn' in result.msg" - - # ============================================================ - - name: test assume role with invalid policy - sts_assume_role: - role_arn: "{{ test_role.iam_role.arn }}" - role_session_name: "AnsibleTest" - policy: "invalid policy" - register: result - ignore_errors: true - - - name: assert assume role with invalid policy - assert: - that: - - 'result.failed' - - "'The policy is not in the valid JSON format.' in result.msg" - when: result.module_stderr is not defined - - - name: assert assume role with invalid policy - assert: - that: - - 'result.failed' - - "'The policy is not in the valid JSON format.' in result.module_stderr" - when: result.module_stderr is defined - - # ============================================================ - - name: test assume role with invalid duration seconds - sts_assume_role: - role_arn: "{{ test_role.iam_role.arn }}" - role_session_name: AnsibleTest - duration_seconds: invalid duration - register: result - ignore_errors: true - - - name: assert assume role with invalid duration seconds - assert: - that: - - result is failed - - "'duration_seconds' in result.msg" - - "'cannot be converted to an int' in result.msg" - - # ============================================================ - - name: test assume role with invalid external id - sts_assume_role: - role_arn: "{{ test_role.iam_role.arn }}" - role_session_name: AnsibleTest - external_id: invalid external id - register: result - ignore_errors: true - - - name: assert assume role with invalid external id - assert: - that: - - 'result.failed' - - "'Member must satisfy regular expression pattern:' in result.msg" - when: result.module_stderr is not defined - - - name: assert assume role with invalid external id - assert: - that: - - 'result.failed' - - "'Member must satisfy regular expression pattern:' in result.module_stderr" - when: result.module_stderr is defined - - # ============================================================ - - name: test assume role with invalid mfa serial number - sts_assume_role: - role_arn: "{{ test_role.iam_role.arn }}" - role_session_name: AnsibleTest - mfa_serial_number: invalid serial number - register: result - ignore_errors: true - - - name: assert assume role with invalid mfa serial number - assert: - that: - - 'result.failed' - - "'Member must satisfy regular expression pattern:' in result.msg" - when: result.module_stderr is not defined - - - name: assert assume role with invalid mfa serial number - assert: - that: - - 'result.failed' - - "'Member must satisfy regular expression pattern:' in result.module_stderr" - when: result.module_stderr is defined - - # ============================================================ - - name: test assume role with invalid mfa token code - sts_assume_role: - role_arn: "{{ test_role.iam_role.arn }}" - role_session_name: AnsibleTest - mfa_token: invalid token code - register: result - ignore_errors: true - - - name: assert assume role with invalid mfa token code - assert: - that: - - 'result.failed' - - "'Member must satisfy regular expression pattern:' in result.msg" - when: result.module_stderr is not defined - - - name: assert assume role with invalid mfa token code - assert: - that: - - 'result.failed' - - "'Member must satisfy regular expression pattern:' in result.module_stderr" - when: result.module_stderr is defined - - # ============================================================ - - name: test assume role with invalid role_arn - sts_assume_role: - role_arn: invalid role arn - role_session_name: AnsibleTest - register: result - ignore_errors: true - - - name: assert assume role with invalid role_arn - assert: - that: - - result.failed - - "'Invalid length for parameter RoleArn' in result.msg" - when: result.module_stderr is not defined - - - name: assert assume role with invalid role_arn - assert: - that: - - 'result.failed' - - "'Member must have length greater than or equal to 20' in result.module_stderr" - when: result.module_stderr is defined - - # ============================================================ - - name: test assume not existing sts role - sts_assume_role: - role_arn: "arn:aws:iam::123456789:role/non-existing-role" - role_session_name: "AnsibleTest" - register: result - ignore_errors: true - - - name: assert assume not existing sts role - assert: - that: - - 'result.failed' - - "'is not authorized to perform: sts:AssumeRole' in result.msg" - when: result.module_stderr is not defined - - - name: assert assume not existing sts role - assert: - that: - - 'result.failed' - - "'is not authorized to perform: sts:AssumeRole' in result.msg" - when: result.module_stderr is defined - - # ============================================================ - - name: test assume role - sts_assume_role: - role_arn: "{{ test_role.iam_role.arn }}" - role_session_name: AnsibleTest - register: assumed_role - - - name: assert assume role - assert: - that: - - 'not assumed_role.failed' - - "'sts_creds' in assumed_role" - - "'access_key' in assumed_role.sts_creds" - - "'secret_key' in assumed_role.sts_creds" - - "'session_token' in assumed_role.sts_creds" - - # ============================================================ - - name: test that assumed credentials have IAM read-only access - iam_role: - aws_access_key: "{{ assumed_role.sts_creds.access_key }}" - aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" - security_token: "{{ assumed_role.sts_creds.session_token }}" - name: "{{ iam_role_name }}" - assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" - create_instance_profile: False - state: present - register: result - - - name: assert assumed role with privileged action (expect changed=false) - assert: - that: - - 'not result.failed' - - 'not result.changed' - - "'iam_role' in result" - - # ============================================================ - - name: test assumed role with unprivileged action - iam_role: - aws_access_key: "{{ assumed_role.sts_creds.access_key }}" - aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" - security_token: "{{ assumed_role.sts_creds.session_token }}" - name: "{{ iam_role_name }}-new" - assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" - state: present - register: result - ignore_errors: true - - - name: assert assumed role with unprivileged action (expect changed=false) - assert: - that: - - 'result.failed' - - "'is not authorized to perform: iam:CreateRole' in result.msg" - # runs on Python2 - when: result.module_stderr is not defined - - - name: assert assumed role with unprivileged action (expect changed=false) - assert: - that: - - 'result.failed' - - "'is not authorized to perform: iam:CreateRole' in result.module_stderr" - # runs on Python3 - when: result.module_stderr is defined - - # ============================================================ - always: - - - name: delete test iam role - iam_role: - name: "{{ iam_role_name }}" - assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" - delete_instance_profile: True - managed_policy: - - arn:aws:iam::aws:policy/IAMReadOnlyAccess - state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/sts_session_token/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/sts_session_token/tasks/main.yml index 6231119ec..c814cfd5f 100644 --- a/ansible_collections/community/aws/tests/integration/targets/sts_session_token/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/sts_session_token/tasks/main.yml @@ -3,9 +3,9 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - amazon.aws block: @@ -54,9 +54,9 @@ - name: Get ARN of user when running with generated token aws_caller_info: - aws_access_key: "{{ token_details.sts_creds.access_key }}" - aws_secret_key: "{{ token_details.sts_creds.secret_key }}" - security_token: "{{ token_details.sts_creds.session_token }}" + access_key: "{{ token_details.sts_creds.access_key }}" + secret_key: "{{ token_details.sts_creds.secret_key }}" + session_token: "{{ token_details.sts_creds.session_token }}" register: token_aws_caller_info - assert: diff --git a/ansible_collections/community/aws/tests/integration/targets/waf_web_acl/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/waf_web_acl/tasks/main.yml index c176e7def..acbf1f29c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/waf_web_acl/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/waf_web_acl/tasks/main.yml @@ -4,9 +4,9 @@ - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + session_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: @@ -15,7 +15,7 @@ ################################################## - name: create WAF IP condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "10.0.0.0/8" @@ -23,7 +23,7 @@ register: create_waf_ip_condition - name: add an IP address to WAF condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "10.0.0.0/8" @@ -37,7 +37,7 @@ - add_ip_address_to_waf_condition.condition.ip_set_descriptors|length == 2 - name: add an IP address to WAF condition (rely on purge_filters defaulting to false) - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "192.168.10.0/24" @@ -51,7 +51,7 @@ - add_ip_address_to_waf_condition_no_purge.changed - name: add an IP address to WAF condition (set purge_filters) - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "192.168.20.0/24" @@ -66,7 +66,7 @@ - add_ip_address_to_waf_condition_purge.changed - name: create WAF byte condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_byte_condition" filters: - field_to_match: header @@ -77,7 +77,7 @@ register: create_waf_byte_condition - name: recreate WAF byte condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_byte_condition" filters: - field_to_match: header @@ -93,7 +93,7 @@ - not recreate_waf_byte_condition.changed - name: create WAF geo condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_geo_condition" filters: - country: US @@ -103,7 +103,7 @@ register: create_waf_geo_condition - name: create WAF size condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_size_condition" filters: - field_to_match: query_string @@ -113,7 +113,7 @@ register: create_waf_size_condition - name: create WAF sql condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_sql_condition" filters: - field_to_match: query_string @@ -122,7 +122,7 @@ register: create_waf_sql_condition - name: create WAF xss condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_xss_condition" filters: - field_to_match: query_string @@ -131,7 +131,7 @@ register: create_waf_xss_condition - name: create WAF regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" filters: - field_to_match: query_string @@ -145,7 +145,7 @@ register: create_waf_regex_condition - name: create a second WAF regex condition with the same regex - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition_part_2" filters: - field_to_match: header @@ -169,7 +169,7 @@ - name: delete first WAF regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" filters: - field_to_match: query_string @@ -184,7 +184,7 @@ register: delete_waf_regex_condition - name: delete second WAF regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition_part_2" filters: - field_to_match: header @@ -200,7 +200,7 @@ register: delete_second_waf_regex_condition - name: create WAF regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" filters: - field_to_match: query_string @@ -221,7 +221,7 @@ create_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id - name: create WAF Regional IP condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "10.0.0.0/8" @@ -231,7 +231,7 @@ register: create_waf_regional_ip_condition - name: add an IP address to WAF Regional condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "10.0.0.0/8" @@ -247,7 +247,7 @@ - add_ip_address_to_waf_regional_condition.condition.ip_set_descriptors|length == 2 - name: add an IP address to WAF Regional condition (rely on purge_filters defaulting to false) - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "192.168.10.0/24" @@ -263,7 +263,7 @@ - add_ip_address_to_waf_regional_condition_no_purge.changed - name: add an IP address to WAF Regional condition (set purge_filters) - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "192.168.20.0/24" @@ -280,7 +280,7 @@ - add_ip_address_to_waf_regional_condition_purge.changed - name: create WAF Regional byte condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_byte_condition" filters: - field_to_match: header @@ -293,7 +293,7 @@ register: create_waf_regional_byte_condition - name: recreate WAF Regional byte condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_byte_condition" filters: - field_to_match: header @@ -311,7 +311,7 @@ - not recreate_waf_regional_byte_condition.changed - name: create WAF Regional geo condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_geo_condition" filters: - country: US @@ -323,7 +323,7 @@ register: create_waf_regional_geo_condition - name: create WAF Regional size condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_size_condition" filters: - field_to_match: query_string @@ -335,7 +335,7 @@ register: create_waf_regional_size_condition - name: create WAF Regional sql condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_sql_condition" filters: - field_to_match: query_string @@ -346,7 +346,7 @@ register: create_waf_regional_sql_condition - name: create WAF Regional xss condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_xss_condition" filters: - field_to_match: query_string @@ -357,7 +357,7 @@ register: create_waf_regional_xss_condition - name: create WAF Regional regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" filters: - field_to_match: query_string @@ -373,7 +373,7 @@ register: create_waf_regional_regex_condition - name: create a second WAF Regional regex condition with the same regex - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition_part_2" filters: - field_to_match: header @@ -399,7 +399,7 @@ - name: delete first WAF Regional regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" filters: - field_to_match: query_string @@ -416,7 +416,7 @@ register: delete_waf_regional_regex_condition - name: delete second WAF Regional regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition_part_2" filters: - field_to_match: header @@ -434,7 +434,7 @@ register: delete_second_waf_regional_regex_condition - name: create WAF Regional regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" filters: - field_to_match: query_string @@ -461,7 +461,7 @@ ################################################## - name: create WAF rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_regex_condition" @@ -483,7 +483,7 @@ - create_aws_waf_rule.rule.predicates|length == 3 - name: recreate WAF rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_regex_condition" @@ -504,7 +504,7 @@ - create_aws_waf_rule.rule.predicates|length == 3 - name: add further WAF rules relying on purge_conditions defaulting to false - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_ip_condition" @@ -525,7 +525,7 @@ - add_conditions_to_aws_waf_rule.rule.predicates|length == 6 - name: remove some rules through purging conditions - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_ip_condition" @@ -550,7 +550,7 @@ - add_and_remove_waf_rule_conditions.rule.predicates|length == 4 - name: attempt to remove an in use condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_size_condition" type: size state: absent @@ -561,10 +561,10 @@ assert: that: - remove_in_use_condition.failed - - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg" + - "'Condition ' ~ resource_prefix ~ '_size_condition is in use' in remove_in_use_condition.msg" - name: create WAF Regional rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_regex_condition" @@ -588,7 +588,7 @@ - create_aws_waf_regional_rule.rule.predicates|length == 3 - name: recreate WAF Regional rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_regex_condition" @@ -611,7 +611,7 @@ - create_aws_waf_regional_rule.rule.predicates|length == 3 - name: add further WAF Regional rules relying on purge_conditions defaulting to false - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_ip_condition" @@ -634,7 +634,7 @@ - add_conditions_to_aws_waf_regional_rule.rule.predicates|length == 6 - name: remove some rules through purging conditions - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" conditions: - name: "{{ resource_prefix }}_ip_condition" @@ -661,7 +661,7 @@ - add_and_remove_waf_regional_rule_conditions.rule.predicates|length == 4 - name: attempt to remove an WAF Regional in use condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_size_condition" type: size state: absent @@ -674,14 +674,14 @@ assert: that: - remove_in_use_condition.failed - - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg" + - "'Condition ' ~ resource_prefix ~ '_size_condition is in use' in remove_in_use_condition.msg" ################################################## # aws_waf_web_acl tests ################################################## - name: create web ACL - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule" @@ -693,7 +693,7 @@ register: create_web_acl - name: recreate web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule" @@ -710,7 +710,7 @@ - recreate_web_acl.web_acl.rules|length == 1 - name: create a second WAF rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule_2" conditions: - name: "{{ resource_prefix }}_ip_condition" @@ -724,7 +724,7 @@ negated: no - name: add a new rule to the web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule_2" @@ -741,7 +741,7 @@ - web_acl_add_rule.web_acl.rules|length == 2 - name: use purge rules to remove the first rule - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule_2" @@ -759,7 +759,7 @@ - web_acl_add_rule.web_acl.rules|length == 1 - name: swap two rules of same priority - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule" @@ -771,7 +771,7 @@ register: web_acl_swap_rule - name: attempt to delete the inuse first rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" state: absent ignore_errors: yes @@ -783,7 +783,7 @@ - remove_inuse_rule.failed - name: delete the web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" state: absent register: delete_web_acl @@ -795,12 +795,12 @@ - not delete_web_acl.web_acl - name: delete the no longer in use first rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" state: absent - name: create WAF Regional web ACL - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule" @@ -814,7 +814,7 @@ register: create_waf_regional_web_acl - name: recreate WAF Regional web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule" @@ -833,7 +833,7 @@ - recreate_waf_regional_web_acl.web_acl.rules|length == 1 - name: create a second WAF Regional rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule_2" conditions: - name: "{{ resource_prefix }}_ip_condition" @@ -849,7 +849,7 @@ waf_regional: true - name: add a new rule to the WAF Regional web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule_2" @@ -868,7 +868,7 @@ - waf_regional_web_acl_add_rule.web_acl.rules|length == 2 - name: use purge rules to remove the WAF Regional first rule - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule_2" @@ -888,7 +888,7 @@ - waf_regional_web_acl_add_rule.web_acl.rules|length == 1 - name: swap two WAF Regional rules of same priority - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" rules: - name: "{{ resource_prefix }}_rule" @@ -902,7 +902,7 @@ register: waf_regional_web_acl_swap_rule - name: attempt to delete the WAF Regional inuse first rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" state: absent region: "{{ aws_region }}" @@ -916,7 +916,7 @@ - remove_waf_regional_inuse_rule.failed - name: delete the WAF Regional web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" state: absent region: "{{ aws_region }}" @@ -930,7 +930,7 @@ - not delete_waf_regional_web_acl.web_acl - name: delete the no longer in use WAF Regional first rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" state: absent region: "{{ aws_region }}" @@ -945,84 +945,84 @@ msg: "****** TEARDOWN STARTS HERE ******" - name: delete the web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" state: absent purge_rules: yes ignore_errors: yes - name: remove second WAF rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule_2" state: absent purge_conditions: yes ignore_errors: yes - name: remove WAF rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" state: absent purge_conditions: yes ignore_errors: yes - name: remove XSS condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_xss_condition" type: xss state: absent ignore_errors: yes - name: remove SQL condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_sql_condition" type: sql state: absent ignore_errors: yes - name: remove size condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_size_condition" type: size state: absent ignore_errors: yes - name: remove geo condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_geo_condition" type: geo state: absent ignore_errors: yes - name: remove byte condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_byte_condition" type: byte state: absent ignore_errors: yes - name: remove ip address condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" type: ip state: absent ignore_errors: yes - name: remove regex part 2 condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition_part_2" type: regex state: absent ignore_errors: yes - name: remove first regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" type: regex state: absent ignore_errors: yes - name: delete the WAF Regional web acl - aws_waf_web_acl: + waf_web_acl: name: "{{ resource_prefix }}_web_acl" state: absent purge_rules: yes @@ -1031,7 +1031,7 @@ ignore_errors: yes - name: remove second WAF Regional rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule_2" state: absent purge_conditions: yes @@ -1040,7 +1040,7 @@ ignore_errors: yes - name: remove WAF Regional rule - aws_waf_rule: + waf_rule: name: "{{ resource_prefix }}_rule" state: absent purge_conditions: yes @@ -1049,7 +1049,7 @@ ignore_errors: yes - name: remove WAF Regional XSS condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_xss_condition" type: xss state: absent @@ -1058,7 +1058,7 @@ ignore_errors: yes - name: remove WAF Regional SQL condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_sql_condition" type: sql state: absent @@ -1067,7 +1067,7 @@ ignore_errors: yes - name: remove WAF Regional size condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_size_condition" type: size state: absent @@ -1076,7 +1076,7 @@ ignore_errors: yes - name: remove WAF Regional geo condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_geo_condition" type: geo state: absent @@ -1085,7 +1085,7 @@ ignore_errors: yes - name: remove WAF Regional byte condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_byte_condition" type: byte state: absent @@ -1094,7 +1094,7 @@ ignore_errors: yes - name: remove WAF Regional ip address condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_ip_condition" type: ip state: absent @@ -1103,7 +1103,7 @@ ignore_errors: yes - name: remove WAF Regional regex part 2 condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition_part_2" type: regex state: absent @@ -1112,7 +1112,7 @@ ignore_errors: yes - name: remove first WAF Regional regex condition - aws_waf_condition: + waf_condition: name: "{{ resource_prefix }}_regex_condition" type: regex state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/alb.yml b/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/alb.yml index 32aeb376a..c56ad6d46 100644 --- a/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/alb.yml +++ b/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/alb.yml @@ -63,7 +63,7 @@ gateway_id: '{{ igw.gateway_id }}' register: route_table -- ec2_group: +- ec2_security_group: name: '{{ resource_prefix }}' description: security group for Ansible ALB integration tests state: present diff --git a/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/main.yml index 547c4c151..a536cf405 100644 --- a/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/main.yml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: @@ -103,10 +103,6 @@ ######################### - name: destroy ALB elb_application_lb: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - region: '{{ aws_region }}' name: '{{ alb_name }}' state: absent wait: true @@ -115,10 +111,6 @@ - name: destroy target group if it was created elb_target_group: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - region: '{{ aws_region }}' name: '{{ tg_name }}' protocol: http port: 80 @@ -134,11 +126,7 @@ ignore_errors: true - name: destroy sec group - ec2_group: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - region: '{{ aws_region }}' + ec2_security_group: name: '{{ sec_group.group_name }}' description: security group for Ansible ALB integration tests state: absent @@ -151,10 +139,6 @@ - name: remove route table ec2_vpc_route_table: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - region: '{{ aws_region }}' vpc_id: '{{ vpc.vpc.id }}' route_table_id: '{{ route_table.route_table.route_table_id }}' lookup: id @@ -167,10 +151,6 @@ - name: destroy subnets ec2_vpc_subnet: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - region: '{{ aws_region }}' cidr: '{{ item.cidr }}' vpc_id: '{{ vpc.vpc.id }}' state: absent @@ -187,10 +167,6 @@ - name: destroy internet gateway ec2_vpc_igw: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - region: '{{ aws_region }}' vpc_id: '{{ vpc.vpc.id }}' tags: Name: '{{ resource_prefix }}' @@ -203,10 +179,6 @@ - name: destroy VPC ec2_vpc_net: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - region: '{{ aws_region }}' cidr_block: 10.228.228.0/22 name: '{{ resource_prefix }}_vpc' state: absent diff --git a/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/rule_group.yml b/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/rule_group.yml index 6ec46f5dd..7648504be 100644 --- a/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/rule_group.yml +++ b/ansible_collections/community/aws/tests/integration/targets/wafv2/tasks/rule_group.yml @@ -79,7 +79,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -554,7 +553,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -671,7 +669,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out diff --git a/ansible_collections/community/aws/tests/integration/targets/wafv2_ip_set/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/wafv2_ip_set/tasks/main.yml index f7afc5b93..6fcf4438c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/wafv2_ip_set/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/wafv2_ip_set/tasks/main.yml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - name: check_mode create ip set diff --git a/ansible_collections/community/aws/tests/integration/targets/wafv2_rule_group/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/wafv2_rule_group/tasks/main.yml index 630d5de29..b2a2fcd8c 100644 --- a/ansible_collections/community/aws/tests/integration/targets/wafv2_rule_group/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/wafv2_rule_group/tasks/main.yml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: #################################### @@ -87,7 +87,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -562,7 +561,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -679,7 +677,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out diff --git a/ansible_collections/community/aws/tests/integration/targets/wafv2_web_acl/tasks/main.yml b/ansible_collections/community/aws/tests/integration/targets/wafv2_web_acl/tasks/main.yml index 9d44e2b77..64544fd50 100644 --- a/ansible_collections/community/aws/tests/integration/targets/wafv2_web_acl/tasks/main.yml +++ b/ansible_collections/community/aws/tests/integration/targets/wafv2_web_acl/tasks/main.yml @@ -1,9 +1,9 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: diff --git a/ansible_collections/community/aws/tests/requirements.yml b/ansible_collections/community/aws/tests/requirements.yml deleted file mode 100644 index 98b77a444..000000000 --- a/ansible_collections/community/aws/tests/requirements.yml +++ /dev/null @@ -1,7 +0,0 @@ -integration_tests_dependencies: -- amazon.aws >= 3.0.0 -- ansible.windows -- community.crypto -- community.general -unit_tests_dependencies: -- amazon.aws >= 3.0.0 diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.11.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.11.txt index 1c8bcbbeb..e69de29bb 100644 --- a/ansible_collections/community/aws/tests/sanity/ignore-2.11.txt +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.11.txt @@ -1 +0,0 @@ -plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.12.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.12.txt index 1c8bcbbeb..e69de29bb 100644 --- a/ansible_collections/community/aws/tests/sanity/ignore-2.12.txt +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.12.txt @@ -1 +0,0 @@ -plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.13.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.13.txt index 1c8bcbbeb..e69de29bb 100644 --- a/ansible_collections/community/aws/tests/sanity/ignore-2.13.txt +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.13.txt @@ -1 +0,0 @@ -plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.14.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.14.txt index 1c8bcbbeb..67d3693df 100644 --- a/ansible_collections/community/aws/tests/sanity/ignore-2.14.txt +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.14.txt @@ -1 +1,2 @@ -plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM +plugins/connection/aws_ssm.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 +plugins/inventory/aws_mq.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.15.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.15.txt index 1c8bcbbeb..67d3693df 100644 --- a/ansible_collections/community/aws/tests/sanity/ignore-2.15.txt +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.15.txt @@ -1 +1,2 @@ -plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM +plugins/connection/aws_ssm.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 +plugins/inventory/aws_mq.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.16.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.16.txt new file mode 100644 index 000000000..67d3693df --- /dev/null +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.16.txt @@ -0,0 +1,2 @@ +plugins/connection/aws_ssm.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 +plugins/inventory/aws_mq.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.17.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.17.txt new file mode 100644 index 000000000..67d3693df --- /dev/null +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.17.txt @@ -0,0 +1,2 @@ +plugins/connection/aws_ssm.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 +plugins/inventory/aws_mq.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 diff --git a/ansible_collections/community/aws/tests/sanity/ignore-2.9.txt b/ansible_collections/community/aws/tests/sanity/ignore-2.9.txt index 5ae2cc9cc..e69de29bb 100644 --- a/ansible_collections/community/aws/tests/sanity/ignore-2.9.txt +++ b/ansible_collections/community/aws/tests/sanity/ignore-2.9.txt @@ -1 +0,0 @@ -plugins/modules/iam_role.py pylint:ansible-deprecated-no-version diff --git a/ansible_collections/community/aws/tests/sanity/requirements.yml b/ansible_collections/community/aws/tests/sanity/requirements.yml new file mode 100644 index 000000000..99ce82a1b --- /dev/null +++ b/ansible_collections/community/aws/tests/sanity/requirements.yml @@ -0,0 +1,5 @@ +--- +collections: + - name: https://github.com/ansible-collections/amazon.aws.git + type: git + version: main diff --git a/ansible_collections/community/aws/tests/unit/compat/builtins.py b/ansible_collections/community/aws/tests/unit/compat/builtins.py index 349d310e8..3df85be4f 100644 --- a/ansible_collections/community/aws/tests/unit/compat/builtins.py +++ b/ansible_collections/community/aws/tests/unit/compat/builtins.py @@ -16,7 +16,10 @@ # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type # @@ -28,6 +31,6 @@ __metaclass__ = type try: import __builtin__ # pylint: disable=unused-import except ImportError: - BUILTINS = 'builtins' + BUILTINS = "builtins" else: - BUILTINS = '__builtin__' + BUILTINS = "__builtin__" diff --git a/ansible_collections/community/aws/tests/unit/compat/mock.py b/ansible_collections/community/aws/tests/unit/compat/mock.py deleted file mode 100644 index 0972cd2e8..000000000 --- a/ansible_collections/community/aws/tests/unit/compat/mock.py +++ /dev/null @@ -1,122 +0,0 @@ -# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com> -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat module for Python3.x's unittest.mock module -''' -import sys - -# Python 2.7 - -# Note: Could use the pypi mock library on python3.x as well as python2.x. It -# is the same as the python3 stdlib mock library - -try: - # Allow wildcard import because we really do want to import all of mock's - # symbols into this compat shim - # pylint: disable=wildcard-import,unused-wildcard-import - from unittest.mock import * -except ImportError: - # Python 2 - # pylint: disable=wildcard-import,unused-wildcard-import - try: - from mock import * - except ImportError: - print('You need the mock library installed on python2.x to run tests') - - -# Prior to 3.4.4, mock_open cannot handle binary read_data -if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): - file_spec = None - - def _iterate_read_data(read_data): - # Helper for mock_open: - # Retrieve lines from read_data via a generator so that separate calls to - # readline, read, and readlines are properly interleaved - sep = b'\n' if isinstance(read_data, bytes) else '\n' - data_as_list = [l + sep for l in read_data.split(sep)] - - if data_as_list[-1] == sep: - # If the last line ended in a newline, the list comprehension will have an - # extra entry that's just a newline. Remove this. - data_as_list = data_as_list[:-1] - else: - # If there wasn't an extra newline by itself, then the file being - # emulated doesn't have a newline to end the last line remove the - # newline that our naive format() added - data_as_list[-1] = data_as_list[-1][:-1] - - for line in data_as_list: - yield line - - def mock_open(mock=None, read_data=''): - """ - A helper function to create a mock to replace the use of `open`. It works - for `open` called directly or used as a context manager. - - The `mock` argument is the mock object to configure. If `None` (the - default) then a `MagicMock` will be created for you, with the API limited - to methods or attributes available on standard file handles. - - `read_data` is a string for the `read` methoddline`, and `readlines` of the - file handle to return. This is an empty string by default. - """ - def _readlines_side_effect(*args, **kwargs): - if handle.readlines.return_value is not None: - return handle.readlines.return_value - return list(_data) - - def _read_side_effect(*args, **kwargs): - if handle.read.return_value is not None: - return handle.read.return_value - return type(read_data)().join(_data) - - def _readline_side_effect(): - if handle.readline.return_value is not None: - while True: - yield handle.readline.return_value - for line in _data: - yield line - - global file_spec - if file_spec is None: - import _io - file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) - - if mock is None: - mock = MagicMock(name='open', spec=open) - - handle = MagicMock(spec=file_spec) - handle.__enter__.return_value = handle - - _data = _iterate_read_data(read_data) - - handle.write.return_value = None - handle.read.return_value = None - handle.readline.return_value = None - handle.readlines.return_value = None - - handle.read.side_effect = _read_side_effect - handle.readline.side_effect = _readline_side_effect() - handle.readlines.side_effect = _readlines_side_effect - - mock.return_value = handle - return mock diff --git a/ansible_collections/community/aws/tests/unit/compat/unittest.py b/ansible_collections/community/aws/tests/unit/compat/unittest.py deleted file mode 100644 index 98f08ad6a..000000000 --- a/ansible_collections/community/aws/tests/unit/compat/unittest.py +++ /dev/null @@ -1,38 +0,0 @@ -# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com> -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat module for Python2.7's unittest module -''' - -import sys - -# Allow wildcard import because we really do want to import all of -# unittests's symbols into this compat shim -# pylint: disable=wildcard-import,unused-wildcard-import -if sys.version_info < (2, 7): - try: - # Need unittest2 on python2.6 - from unittest2 import * - except ImportError: - print('You need unittest2 installed on python2.6.x to run tests') -else: - from unittest import * diff --git a/ansible_collections/community/aws/tests/unit/constraints.txt b/ansible_collections/community/aws/tests/unit/constraints.txt index cd546e7c2..5708323f1 100644 --- a/ansible_collections/community/aws/tests/unit/constraints.txt +++ b/ansible_collections/community/aws/tests/unit/constraints.txt @@ -1,7 +1,7 @@ # Specifically run tests against the oldest versions that we support -boto3==1.18.0 -botocore==1.21.0 +botocore==1.29.0 +boto3==1.26.0 # AWS CLI has `botocore==` dependencies, provide the one that matches botocore # to avoid needing to download over a years worth of awscli wheels. -awscli==1.20.0 +awscli==1.27.0 diff --git a/ansible_collections/community/aws/tests/unit/mock/loader.py b/ansible_collections/community/aws/tests/unit/mock/loader.py index 00a584127..339a1918c 100644 --- a/ansible_collections/community/aws/tests/unit/mock/loader.py +++ b/ansible_collections/community/aws/tests/unit/mock/loader.py @@ -16,21 +16,24 @@ # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import os from ansible.errors import AnsibleParserError +from ansible.module_utils._text import to_bytes +from ansible.module_utils._text import to_text from ansible.parsing.dataloader import DataLoader -from ansible.module_utils._text import to_bytes, to_text class DictDataLoader(DataLoader): - def __init__(self, file_mapping=None): file_mapping = {} if file_mapping is None else file_mapping - assert type(file_mapping) == dict + assert isinstance(file_mapping, dict) super(DictDataLoader, self).__init__() @@ -51,7 +54,7 @@ class DictDataLoader(DataLoader): if file_name in self._file_mapping: return (to_bytes(self._file_mapping[file_name]), False) else: - raise AnsibleParserError("file not found: %s" % file_name) + raise AnsibleParserError(f"file not found: {file_name}") def path_exists(self, path): path = to_text(path) @@ -68,7 +71,7 @@ class DictDataLoader(DataLoader): def list_directory(self, path): ret = [] path = to_text(path) - for x in (list(self._file_mapping.keys()) + self._known_directories): + for x in list(self._file_mapping.keys()) + self._known_directories: if x.startswith(path): if os.path.dirname(x) == path: ret.append(os.path.basename(x)) @@ -86,7 +89,7 @@ class DictDataLoader(DataLoader): self._known_directories = [] for path in self._file_mapping: dirname = os.path.dirname(path) - while dirname not in ('/', ''): + while dirname not in ("/", ""): self._add_known_directory(dirname) dirname = os.path.dirname(dirname) diff --git a/ansible_collections/community/aws/tests/unit/mock/path.py b/ansible_collections/community/aws/tests/unit/mock/path.py index 676b35ab8..8057e5a58 100644 --- a/ansible_collections/community/aws/tests/unit/mock/path.py +++ b/ansible_collections/community/aws/tests/unit/mock/path.py @@ -1,10 +1,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from unittest.mock import MagicMock -from ansible_collections.community.aws.tests.unit.compat.mock import MagicMock from ansible.utils.path import unfrackpath - mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x) diff --git a/ansible_collections/community/aws/tests/unit/mock/procenv.py b/ansible_collections/community/aws/tests/unit/mock/procenv.py index e516a9458..0d8547f50 100644 --- a/ansible_collections/community/aws/tests/unit/mock/procenv.py +++ b/ansible_collections/community/aws/tests/unit/mock/procenv.py @@ -16,22 +16,19 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys import json - +import sys +import unittest from contextlib import contextmanager -from io import BytesIO, StringIO -from ansible_collections.community.aws.tests.unit.compat import unittest -from ansible.module_utils.six import PY3 +from io import BytesIO +from io import StringIO + from ansible.module_utils._text import to_bytes +from ansible.module_utils.six import PY3 @contextmanager -def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): +def swap_stdin_and_argv(stdin_data="", argv_data=tuple()): """ context manager that temporarily masks the test runner's values for stdin and argv """ @@ -77,7 +74,7 @@ def swap_stdout(): class ModuleTestCase(unittest.TestCase): def setUp(self, module_args=None): if module_args is None: - module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} + module_args = {"_ansible_remote_tmp": "/tmp", "_ansible_keep_remote_files": False} args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) diff --git a/ansible_collections/community/aws/tests/unit/mock/vault_helper.py b/ansible_collections/community/aws/tests/unit/mock/vault_helper.py index b54629da4..c55228c88 100644 --- a/ansible_collections/community/aws/tests/unit/mock/vault_helper.py +++ b/ansible_collections/community/aws/tests/unit/mock/vault_helper.py @@ -1,27 +1,29 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type from ansible.module_utils._text import to_bytes - from ansible.parsing.vault import VaultSecret class TextVaultSecret(VaultSecret): - '''A secret piece of text. ie, a password. Tracks text encoding. + """A secret piece of text. ie, a password. Tracks text encoding. The text encoding of the text may not be the default text encoding so - we keep track of the encoding so we encode it to the same bytes.''' + we keep track of the encoding so we encode it to the same bytes.""" def __init__(self, text, encoding=None, errors=None, _bytes=None): super(TextVaultSecret, self).__init__() self.text = text - self.encoding = encoding or 'utf-8' + self.encoding = encoding or "utf-8" self._bytes = _bytes - self.errors = errors or 'strict' + self.errors = errors or "strict" @property def bytes(self): - '''The text encoded with encoding, unless we specifically set _bytes.''' + """The text encoded with encoding, unless we specifically set _bytes.""" return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) diff --git a/ansible_collections/community/aws/tests/unit/mock/yaml_helper.py b/ansible_collections/community/aws/tests/unit/mock/yaml_helper.py index a646c0241..8c99ef40f 100644 --- a/ansible_collections/community/aws/tests/unit/mock/yaml_helper.py +++ b/ansible_collections/community/aws/tests/unit/mock/yaml_helper.py @@ -1,18 +1,23 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import io + import yaml from ansible.module_utils.six import PY3 -from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing.yaml.dumper import AnsibleDumper +from ansible.parsing.yaml.loader import AnsibleLoader class YamlTestUtils(object): """Mixin class to combine with a unittest.TestCase subclass.""" + def _loader(self, stream): """Vault related tests will want to override this. @@ -45,8 +50,7 @@ class YamlTestUtils(object): obj_2 = loader.get_data() # dump the gen 2 objects directory to strings - string_from_object_dump_2 = self._dump_string(obj_2, - dumper=AnsibleDumper) + string_from_object_dump_2 = self._dump_string(obj_2, dumper=AnsibleDumper) # The gen 1 and gen 2 yaml strings self.assertEqual(string_from_object_dump, string_from_object_dump_2) @@ -66,7 +70,7 @@ class YamlTestUtils(object): self.assertEqual(string_from_object_dump, string_from_object_dump_3) def _old_dump_load_cycle(self, obj): - '''Dump the passed in object to yaml, load it back up, dump again, compare.''' + """Dump the passed in object to yaml, load it back up, dump again, compare.""" stream = io.StringIO() yaml_string = self._dump_string(obj, dumper=AnsibleDumper) @@ -111,16 +115,23 @@ class YamlTestUtils(object): assert yaml_string == yaml_string_obj_from_stream assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == - yaml_string_stream_obj_from_string) + assert ( + yaml_string + == yaml_string_obj_from_stream + == yaml_string_obj_from_string + == yaml_string_stream_obj_from_stream + == yaml_string_stream_obj_from_string + ) assert obj == obj_from_stream assert obj == obj_from_string assert obj == yaml_string_obj_from_stream assert obj == yaml_string_obj_from_string assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - return {'obj': obj, - 'yaml_string': yaml_string, - 'yaml_string_from_stream': yaml_string_from_stream, - 'obj_from_stream': obj_from_stream, - 'obj_from_string': obj_from_string, - 'yaml_string_obj_from_string': yaml_string_obj_from_string} + return { + "obj": obj, + "yaml_string": yaml_string, + "yaml_string_from_stream": yaml_string_from_stream, + "obj_from_stream": obj_from_stream, + "obj_from_string": obj_from_string, + "yaml_string_obj_from_string": yaml_string_obj_from_string, + } diff --git a/ansible_collections/community/aws/tests/unit/plugins/connection/test_aws_ssm.py b/ansible_collections/community/aws/tests/unit/plugins/connection/test_aws_ssm.py index 579cafc16..d5fcb4b1e 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/connection/test_aws_ssm.py +++ b/ansible_collections/community/aws/tests/unit/plugins/connection/test_aws_ssm.py @@ -1,11 +1,11 @@ -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from io import StringIO +from unittest.mock import MagicMock +from unittest.mock import patch + import pytest -from ansible_collections.community.aws.tests.unit.compat.mock import patch, MagicMock from ansible.playbook.play_context import PlayContext from ansible.plugins.loader import connection_loader @@ -15,46 +15,45 @@ if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_data_pipeline.py requires the python modules 'boto3' and 'botocore'") -class TestConnectionBaseClass(): - - @patch('os.path.exists') - @patch('subprocess.Popen') - @patch('select.poll') - @patch('boto3.client') +class TestConnectionBaseClass: + @patch("os.path.exists") + @patch("subprocess.Popen") + @patch("select.poll") + @patch("boto3.client") def test_plugins_connection_aws_ssm_start_session(self, boto_client, s_poll, s_popen, mock_ospe): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.get_option = MagicMock() - conn.get_option.side_effect = ['i1234', 'executable', 'abcd', 'i1234'] - conn.host = 'abc' + conn.get_option.side_effect = ["i1234", "executable", "abcd", "i1234"] + conn.host = "abc" mock_ospe.return_value = True boto3 = MagicMock() - boto3.client('ssm').return_value = MagicMock() + boto3.client("ssm").return_value = MagicMock() conn.start_session = MagicMock() conn._session_id = MagicMock() - conn._session_id.return_value = 's1' + conn._session_id.return_value = "s1" s_popen.return_value.stdin.write = MagicMock() s_poll.return_value = MagicMock() s_poll.return_value.register = MagicMock() s_popen.return_value.poll = MagicMock() s_popen.return_value.poll.return_value = None conn._stdin_readline = MagicMock() - conn._stdin_readline.return_value = 'abc123' - conn.SESSION_START = 'abc' + conn._stdin_readline.return_value = "abc123" + conn.SESSION_START = "abc" conn.start_session() - @patch('random.choice') + @patch("random.choice") def test_plugins_connection_aws_ssm_exec_command(self, r_choice): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) - r_choice.side_effect = ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'] + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) + r_choice.side_effect = ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"] conn.MARK_LENGTH = 5 conn._session = MagicMock() conn._session.stdin.write = MagicMock() conn._wrap_command = MagicMock() - conn._wrap_command.return_value = 'cmd1' + conn._wrap_command.return_value = "cmd1" conn._flush_stderr = MagicMock() conn._windows = MagicMock() conn._windows.return_value = True @@ -67,44 +66,44 @@ class TestConnectionBaseClass(): conn._session.stdout = MagicMock() conn._session.stdout.readline = MagicMock() conn._post_process = MagicMock() - conn._post_process.return_value = 'test' - conn._session.stdout.readline.side_effect = iter(['aaaaa\n', 'Hi\n', '0\n', 'bbbbb\n']) + conn._post_process.return_value = "test" + conn._session.stdout.readline.side_effect = iter(["aaaaa\n", "Hi\n", "0\n", "bbbbb\n"]) conn.get_option = MagicMock() conn.get_option.return_value = 1 - returncode = 'a' - stdout = 'b' + returncode = "a" + stdout = "b" return (returncode, stdout, conn._flush_stderr) def test_plugins_connection_aws_ssm_prepare_terminal(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.is_windows = MagicMock() conn.is_windows.return_value = True def test_plugins_connection_aws_ssm_wrap_command(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.is_windows = MagicMock() conn.is_windows.return_value = True - return 'windows1' + return "windows1" def test_plugins_connection_aws_ssm_post_process(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.is_windows = MagicMock() conn.is_windows.return_value = True conn.stdout = MagicMock() returncode = 0 return returncode, conn.stdout - @patch('subprocess.Popen') + @patch("subprocess.Popen") def test_plugins_connection_aws_ssm_flush_stderr(self, s_popen): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.poll_stderr = MagicMock() conn.poll_stderr.register = MagicMock() conn.stderr = None @@ -121,37 +120,37 @@ class TestConnectionBaseClass(): # boto3.generate_presigned_url.return_value = MagicMock() # return (boto3.generate_presigned_url.return_value) - @patch('os.path.exists') + @patch("os.path.exists") def test_plugins_connection_aws_ssm_put_file(self, mock_ospe): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn._connect = MagicMock() conn._file_transport_command = MagicMock() - conn._file_transport_command.return_value = (0, 'stdout', 'stderr') - conn.put_file('/in/file', '/out/file') + conn._file_transport_command.return_value = (0, "stdout", "stderr") + conn.put_file("/in/file", "/out/file") def test_plugins_connection_aws_ssm_fetch_file(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn._connect = MagicMock() conn._file_transport_command = MagicMock() - conn._file_transport_command.return_value = (0, 'stdout', 'stderr') - conn.fetch_file('/in/file', '/out/file') + conn._file_transport_command.return_value = (0, "stdout", "stderr") + conn.fetch_file("/in/file", "/out/file") - @patch('subprocess.check_output') - @patch('boto3.client') + @patch("subprocess.check_output") + @patch("boto3.client") def test_plugins_connection_file_transport_command(self, boto_client, s_check_output): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.get_option = MagicMock() - conn.get_option.side_effect = ['1', '2', '3', '4', '5'] + conn.get_option.side_effect = ["1", "2", "3", "4", "5"] conn._get_url = MagicMock() - conn._get_url.side_effect = ['url1', 'url2'] + conn._get_url.side_effect = ["url1", "url2"] boto3 = MagicMock() - boto3.client('s3').return_value = MagicMock() + boto3.client("s3").return_value = MagicMock() conn.get_option.return_value = 1 get_command = MagicMock() put_command = MagicMock() @@ -161,11 +160,11 @@ class TestConnectionBaseClass(): conn.exec_command(put_command, in_data=None, sudoable=False) conn.exec_command(get_command, in_data=None, sudoable=False) - @patch('subprocess.check_output') + @patch("subprocess.check_output") def test_plugins_connection_aws_ssm_close(self, s_check_output): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.instance_id = "i-12345" conn._session_id = True conn.get_option = MagicMock() @@ -174,8 +173,8 @@ class TestConnectionBaseClass(): conn._session.terminate = MagicMock() conn._session.communicate = MagicMock() conn._terminate_session = MagicMock() - conn._terminate_session.return_value = '' + conn._terminate_session.return_value = "" conn._session_id = MagicMock() - conn._session_id.return_value = 'a' + conn._session_id.return_value = "a" conn._client = MagicMock() conn.close() diff --git a/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/fixtures/__init__.py b/ansible_collections/community/aws/tests/unit/plugins/inventory/__init__.py index e69de29bb..e69de29bb 100644 --- a/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/fixtures/__init__.py +++ b/ansible_collections/community/aws/tests/unit/plugins/inventory/__init__.py diff --git a/ansible_collections/community/aws/tests/unit/plugins/inventory/test_aws_mq.py b/ansible_collections/community/aws/tests/unit/plugins/inventory/test_aws_mq.py new file mode 100644 index 000000000..8969b4a03 --- /dev/null +++ b/ansible_collections/community/aws/tests/unit/plugins/inventory/test_aws_mq.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- + +# Copyright 2023 Ali AlKhalidi <@doteast> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +import copy +import random +import string +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible.errors import AnsibleError + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +from ansible_collections.community.aws.plugins.inventory.aws_mq import InventoryModule +from ansible_collections.community.aws.plugins.inventory.aws_mq import _add_details_to_hosts +from ansible_collections.community.aws.plugins.inventory.aws_mq import _find_hosts_matching_statuses +from ansible_collections.community.aws.plugins.inventory.aws_mq import _get_broker_host_tags + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_aws_mq.py requires the python modules 'boto3' and 'botocore'") + + +def make_clienterror_exception(code="AccessDenied"): + return botocore.exceptions.ClientError( + { + "Error": {"Code": code, "Message": "User is not authorized to perform: xxx on resource: user yyyy"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "getXXX", + ) + + +@pytest.fixture() +def inventory(): + inventory = InventoryModule() + inventory.inventory = MagicMock() + inventory.inventory.set_variable = MagicMock() + + inventory.all_clients = MagicMock() + inventory.get_option = MagicMock() + + inventory._populate_host_vars = MagicMock() + inventory._set_composite_vars = MagicMock() + inventory._add_host_to_composed_groups = MagicMock() + inventory._add_host_to_keyed_groups = MagicMock() + + inventory.get_cache_key = MagicMock() + + inventory._cache = {} + + return inventory + + +@pytest.fixture() +def connection(): + conn = MagicMock() + return conn + + +@pytest.mark.parametrize( + "suffix,result", + [ + ("aws_mq.yml", True), + ("aws_mq.yaml", True), + ("aws_MQ.yml", False), + ("AWS_mq.yaml", False), + ], +) +def test_inventory_verify_file_suffix(inventory, suffix, result, tmp_path): + test_dir = tmp_path / "test_aws_mq" + test_dir.mkdir() + inventory_file = "inventory" + suffix + inventory_file = test_dir / inventory_file + inventory_file.write_text("my inventory") + assert result == inventory.verify_file(str(inventory_file)) + + +def test_inventory_verify_file_with_missing_file(inventory): + inventory_file = "this_file_does_not_exist_aws_mq.yml" + assert not inventory.verify_file(inventory_file) + + +def generate_random_string(with_digits=True, with_punctuation=True, length=16): + data = string.ascii_letters + if with_digits: + data += string.digits + if with_punctuation: + data += string.punctuation + return "".join([random.choice(data) for i in range(length)]) + + +@pytest.mark.parametrize( + "hosts,statuses,expected", + [ + ( + [ + {"host": "host1", "BrokerState": "DELETION_IN_PROGRESS"}, + {"host": "host2", "BrokerState": "RUNNING"}, + {"host": "host3", "BrokerState": "REBOOT_IN_PROGRESS"}, + {"host": "host4", "BrokerState": "CRITICAL_ACTION_REQUIRED"}, + {"host": "host5", "BrokerState": "CREATION_FAILED"}, + {"host": "host6", "BrokerState": "CREATION_IN_PROGRESS"}, + ], + ["RUNNING"], + [{"host": "host2", "BrokerState": "RUNNING"}], + ), + ( + [ + {"host": "host1", "BrokerState": "DELETION_IN_PROGRESS"}, + {"host": "host2", "BrokerState": "RUNNING"}, + {"host": "host3", "BrokerState": "REBOOT_IN_PROGRESS"}, + {"host": "host4", "BrokerState": "CRITICAL_ACTION_REQUIRED"}, + {"host": "host5", "BrokerState": "CREATION_FAILED"}, + {"host": "host6", "BrokerState": "CREATION_IN_PROGRESS"}, + ], + ["all"], + [ + {"host": "host1", "BrokerState": "DELETION_IN_PROGRESS"}, + {"host": "host2", "BrokerState": "RUNNING"}, + {"host": "host3", "BrokerState": "REBOOT_IN_PROGRESS"}, + {"host": "host4", "BrokerState": "CRITICAL_ACTION_REQUIRED"}, + {"host": "host5", "BrokerState": "CREATION_FAILED"}, + {"host": "host6", "BrokerState": "CREATION_IN_PROGRESS"}, + ], + ), + ( + [ + {"host": "host1", "BrokerState": "DELETION_IN_PROGRESS"}, + {"host": "host2", "BrokerState": "RUNNING"}, + {"host": "host3", "BrokerState": "CREATION_FAILED"}, + {"host": "host4", "BrokerState": "CRITICAL_ACTION_REQUIRED"}, + {"host": "host5", "BrokerState": "RUNNING"}, + {"host": "host6", "BrokerState": "CREATION_IN_PROGRESS"}, + ], + ["RUNNING"], + [ + {"host": "host2", "BrokerState": "RUNNING"}, + {"host": "host5", "BrokerState": "RUNNING"}, + ], + ), + ], +) +def test_find_hosts_matching_statuses(hosts, statuses, expected): + assert expected == _find_hosts_matching_statuses(hosts, statuses) + + +@pytest.mark.parametrize("hosts", ["", "host1", "host2,host3", "host2,host3,host1"]) +@patch("ansible_collections.community.aws.plugins.inventory.aws_mq._get_mq_hostname") +def test_inventory_format_inventory(m_get_mq_hostname, inventory, hosts): + hosts_vars = { + "host1": {"var10": "value10"}, + "host2": {"var20": "value20", "var21": "value21"}, + "host3": {"var30": "value30", "var31": "value31", "var32": "value32"}, + } + + m_get_mq_hostname.side_effect = lambda h: h["name"] + + class _inventory_host(object): + def __init__(self, name, host_vars): + self.name = name + self.vars = host_vars + + inventory.inventory = MagicMock() + inventory.inventory.get_host.side_effect = lambda x: _inventory_host(name=x, host_vars=hosts_vars.get(x)) + + hosts = [{"name": x} for x in hosts.split(",") if x] + expected = { + "_meta": {"hostvars": {x["name"]: hosts_vars.get(x["name"]) for x in hosts}}, + "aws_mq": {"hosts": [x["name"] for x in hosts]}, + } + + assert expected == inventory._format_inventory(hosts) + if hosts == []: + m_get_mq_hostname.assert_not_called() + + +@pytest.mark.parametrize("length", range(0, 10, 2)) +def test_inventory_populate(inventory, length): + group = "aws_mq" + hosts = [f"host_{int(i)}" for i in range(length)] + + inventory._add_hosts = MagicMock() + inventory._populate(hosts=hosts) + + inventory.inventory.add_group.assert_called_with("aws_mq") + + if len(hosts) == 0: + inventory.inventory._add_hosts.assert_not_called() + inventory.inventory.add_child.assert_not_called() + else: + inventory._add_hosts.assert_called_with(hosts=hosts, group=group) + inventory.inventory.add_child.assert_called_with("all", group) + + +def test_inventory_populate_from_cache(inventory): + cache_data = { + "_meta": { + "hostvars": { + "broker_A": {"var10": "value10"}, + "broker_B": {"var2": "value2"}, + "broker_C": {"var3": ["value30", "value31", "value32"]}, + } + }, + "all": {"hosts": ["broker_A", "broker_D", "broker_B", "broker_C"]}, + "aws_broker_group_A": {"hosts": ["broker_A", "broker_D"]}, + "aws_broker_group_B": {"hosts": ["broker_B"]}, + "aws_broker_group_C": {"hosts": ["broker_C"]}, + } + + inventory._populate_from_cache(cache_data) + inventory.inventory.add_group.assert_has_calls( + [ + call("aws_broker_group_A"), + call("aws_broker_group_B"), + call("aws_broker_group_C"), + ], + any_order=True, + ) + inventory.inventory.add_child.assert_has_calls( + [ + call("all", "aws_broker_group_A"), + call("all", "aws_broker_group_B"), + call("all", "aws_broker_group_C"), + ], + any_order=True, + ) + + inventory._populate_host_vars.assert_has_calls( + [ + call(["broker_A"], {"var10": "value10"}, "aws_broker_group_A"), + call(["broker_D"], {}, "aws_broker_group_A"), + call(["broker_B"], {"var2": "value2"}, "aws_broker_group_B"), + call(["broker_C"], {"var3": ["value30", "value31", "value32"]}, "aws_broker_group_C"), + ], + any_order=True, + ) + + +@pytest.mark.parametrize("detail", [{}, {"Tags": {"tag1": "value1", "tag2": "value2", "Tag3": "Value2"}}]) +def test_get_broker_host_tags(detail): + expected_tags = [ + {"Key": "tag1", "Value": "value1"}, + {"Key": "tag2", "Value": "value2"}, + {"Key": "Tag3", "Value": "Value2"}, + ] + + tags = _get_broker_host_tags(detail) + + if not detail: + assert tags == [] + else: + assert tags == expected_tags + + +@pytest.mark.parametrize("strict", [True, False]) +def test_add_details_to_hosts_with_no_hosts(connection, strict): + hosts = [] + + _add_details_to_hosts(connection, hosts, strict) + connection.describe_broker.assert_not_called() + + +def test_add_details_to_hosts_with_failure_not_strict(connection): + hosts = [{"BrokerId": "1"}] + + connection.describe_broker.side_effect = make_clienterror_exception() + + _add_details_to_hosts(connection, hosts, strict=False) + + assert hosts == [{"BrokerId": "1"}] + + +def test_add_details_to_hosts_with_failure_strict(connection): + hosts = [{"BrokerId": "1"}] + + connection.describe_broker.side_effect = make_clienterror_exception() + + with pytest.raises(AnsibleError): + _add_details_to_hosts(connection, hosts, strict=True) + + +def test_add_details_to_hosts_with_hosts(connection): + hosts = [{"BrokerId": "1"}, {"BrokerId": "2"}] + broker_hosts_tags = { + "1": {"Tags": {"tag10": "value10", "tag11": "value11"}}, + "2": {"Tags": {"tag20": "value20", "tag21": "value21", "tag22": "value22"}}, + } + connection.describe_broker.side_effect = lambda **kwargs: broker_hosts_tags.get(kwargs.get("BrokerId")) + + _add_details_to_hosts(connection, hosts, strict=False) + + assert hosts == [ + { + "BrokerId": "1", + "Tags": [ + {"Key": "tag10", "Value": "value10"}, + {"Key": "tag11", "Value": "value11"}, + ], + }, + { + "BrokerId": "2", + "Tags": [ + {"Key": "tag20", "Value": "value20"}, + {"Key": "tag21", "Value": "value21"}, + {"Key": "tag22", "Value": "value22"}, + ], + }, + ] + + +ADD_DETAILS_TO_HOSTS = "ansible_collections.community.aws.plugins.inventory.aws_mq._add_details_to_hosts" + + +@patch(ADD_DETAILS_TO_HOSTS) +def test_get_broker_hosts(m_add_details_to_hosts, inventory, connection): + broker = { + "BrokerArn": "arn:xxx:xxxx", + "BrokerId": "resource_id", + "BrokerName": "brk1", + "BrokerState": "RUNNING", + "EngineType": "RABBITMQ", + "DeploymentMode": "CLUSTER_MULTI_AZ", + } + + conn_paginator = MagicMock() + paginate = MagicMock() + + connection.get_paginator.return_value = conn_paginator + conn_paginator.paginate.return_value = paginate + + paginate.build_full_result.side_effect = lambda **kwargs: {"BrokerSummaries": [broker]} + + connection.describe_broker.return_value = {} + connection.list_brokers.return_value = {"BrokerSummaries": [broker]} + + strict = False + + result = inventory._get_broker_hosts(connection=connection, strict=strict)(paginate.build_full_result) + + assert result == [broker] + + m_add_details_to_hosts.assert_called_with(connection, result, strict) + + +@pytest.mark.parametrize("strict", [True, False]) +@patch(ADD_DETAILS_TO_HOSTS) +def test_get_broker_hosts_with_access_denied(m_add_details_to_hosts, inventory, connection, strict): + conn_paginator = MagicMock() + paginate = MagicMock() + + connection.get_paginator.return_value = conn_paginator + conn_paginator.paginate.return_value = paginate + + paginate.build_full_result.side_effect = make_clienterror_exception() + + if strict: + with pytest.raises(AnsibleError): + inventory._get_broker_hosts(connection=connection, strict=strict)(paginate.build_full_result) + else: + assert inventory._get_broker_hosts(connection=connection, strict=strict)(paginate.build_full_result) == [] + + m_add_details_to_hosts.assert_not_called() + + +@patch(ADD_DETAILS_TO_HOSTS) +def test_get_broker_hosts_with_client_error(m_add_details_to_hosts, inventory, connection): + conn_paginator = MagicMock() + paginate = MagicMock() + + connection.get_paginator.return_value = conn_paginator + conn_paginator.paginate.return_value = paginate + + paginate.build_full_result.side_effect = make_clienterror_exception(code="Unknown") + + with pytest.raises(AnsibleError): + inventory._get_broker_hosts(connection=connection, strict=False)(paginate.build_full_result) + + m_add_details_to_hosts.assert_not_called() + + +FIND_HOSTS_MATCHING_STATUSES = ( + "ansible_collections.community.aws.plugins.inventory.aws_mq._find_hosts_matching_statuses" +) + + +@pytest.mark.parametrize("regions", range(1, 5)) +@patch(FIND_HOSTS_MATCHING_STATUSES) +def test_inventory_get_all_hosts(m_find_hosts, inventory, regions): + params = { + "regions": [f"us-east-{int(i)}" for i in range(regions)], + "strict": random.choice((True, False)), + "statuses": [ + random.choice( + [ + "RUNNING", + "CREATION_IN_PROGRESS", + "REBOOT_IN_PROGRESS", + "DELETION_IN_PROGRESS", + "CRITICAL_ACTION_REQUIRED", + ] + ) + for i in range(3) + ], + } + + connections = [MagicMock() for i in range(regions)] + + inventory.all_clients.return_value = [(connections[i], f"us-east-{int(i)}") for i in range(regions)] + + ids = list(reversed(range(regions))) + broker_hosts = [{"BrokerName": f"broker_00{int(i)}"} for i in ids] + + inventory._get_broker_hosts = MagicMock() + inventory._get_broker_hosts._boto3_paginate_wrapper = MagicMock() + inventory._get_broker_hosts._boto3_paginate_wrapper.side_effect = [[i] for i in broker_hosts] + inventory._get_broker_hosts.return_value = inventory._get_broker_hosts._boto3_paginate_wrapper + + result = list(sorted(broker_hosts, key=lambda x: x["BrokerName"])) + + m_find_hosts.return_value = result + + assert result == inventory._get_all_hosts(**params) + inventory.all_clients.assert_called_with("mq") + inventory._get_broker_hosts.assert_has_calls( + [call(connections[i], params["strict"]) for i in range(regions)], any_order=True + ) + + m_find_hosts.assert_called_with(result, params["statuses"]) + + +@pytest.mark.parametrize("hostvars_prefix", [True]) +@pytest.mark.parametrize("hostvars_suffix", [True]) +@patch("ansible_collections.community.aws.plugins.inventory.aws_mq._get_mq_hostname") +def test_inventory_add_hosts(m_get_mq_hostname, inventory, hostvars_prefix, hostvars_suffix): + _options = { + "strict": random.choice((False, True)), + "compose": random.choice((False, True)), + "keyed_groups": "keyed_group_test_inventory_add_hosts", + "groups": ["all", "test_inventory_add_hosts"], + } + + if hostvars_prefix: + _options["hostvars_prefix"] = f"prefix_{generate_random_string(length=8, with_punctuation=False)}" + if hostvars_suffix: + _options["hostvars_suffix"] = f"suffix_{generate_random_string(length=8, with_punctuation=False)}" + + def _get_option_side_effect(x): + return _options.get(x) + + inventory.get_option.side_effect = _get_option_side_effect + + m_get_mq_hostname.side_effect = lambda h: h["BrokerName"] + + hosts = [ + { + "BrokerName": "broker_i_001", + "Tags": [{"Key": "Name", "Value": "broker_001"}, {"Key": "RunningEngine", "Value": "ActiveMQ"}], + "availability_zone": "us-east-1a", + }, + { + "BrokerName": "broker_i_002", + "Tags": [{"Key": "ClusterName", "Value": "test_cluster"}, {"Key": "RunningOS", "Value": "CoreOS"}], + }, + { + "BrokerName": "test_cluster", + "Tags": [{"Key": "CluserVersionOrigin", "Value": "2.0"}, {"Key": "Provider", "Value": "RedHat"}], + }, + { + "BrokerName": "another_cluster", + "Tags": [{"Key": "TestingPurpose", "Value": "Ansible"}], + "availability_zones": ["us-west-1a", "us-east-1b"], + }, + ] + + group = f"test_add_hosts_group_{generate_random_string(length=10, with_punctuation=False)}" + inventory._add_hosts(hosts, group) + + m_get_mq_hostname.assert_has_calls([call(h) for h in hosts], any_order=True) + + hosts_names = ["broker_i_001", "broker_i_002", "test_cluster", "another_cluster"] + inventory.inventory.add_host.assert_has_calls([call(name, group=group) for name in hosts_names], any_order=True) + + camel_hosts = [ + { + "broker_name": "broker_i_001", + "tags": {"Name": "broker_001", "RunningEngine": "ActiveMQ"}, + "availability_zone": "us-east-1a", + }, + {"broker_name": "broker_i_002", "tags": {"ClusterName": "test_cluster", "RunningOS": "CoreOS"}}, + {"broker_name": "test_cluster", "tags": {"CluserVersionOrigin": "2.0", "Provider": "RedHat"}}, + { + "broker_name": "another_cluster", + "tags": {"TestingPurpose": "Ansible"}, + "availability_zones": ["us-west-1a", "us-east-1b"], + }, + ] + + set_variable_calls = [] + for i in range(len(camel_hosts)): + for var, value in camel_hosts[i].items(): + if hostvars_prefix: + var = _options["hostvars_prefix"] + var + if hostvars_suffix: + var += _options["hostvars_suffix"] + set_variable_calls.append(call(hosts_names[i], var, value)) + + inventory.get_option.assert_has_calls([call("hostvars_prefix"), call("hostvars_suffix")]) + inventory.inventory.set_variable.assert_has_calls(set_variable_calls) + + if hostvars_prefix or hostvars_suffix: + tmp = [] + for host in camel_hosts: + new_host = copy.deepcopy(host) + for key in host: + new_key = key + if hostvars_prefix: + new_key = _options["hostvars_prefix"] + new_key + if hostvars_suffix: + new_key += _options["hostvars_suffix"] + new_host[new_key] = host[key] + tmp.append(new_host) + camel_hosts = tmp + + inventory._set_composite_vars.assert_has_calls( + [ + call(_options["compose"], camel_hosts[i], hosts_names[i], strict=_options["strict"]) + for i in range(len(camel_hosts)) + ], + any_order=True, + ) + inventory._add_host_to_composed_groups.assert_has_calls( + [ + call(_options["groups"], camel_hosts[i], hosts_names[i], strict=_options["strict"]) + for i in range(len(camel_hosts)) + ], + any_order=True, + ) + inventory._add_host_to_keyed_groups.assert_has_calls( + [ + call(_options["keyed_groups"], camel_hosts[i], hosts_names[i], strict=_options["strict"]) + for i in range(len(camel_hosts)) + ], + any_order=True, + ) + + +BASE_INVENTORY_PARSE = "ansible_collections.community.aws.plugins.inventory.aws_mq.AWSInventoryBase.parse" + + +@pytest.mark.parametrize("user_cache_directive", [True, False]) +@pytest.mark.parametrize("cache", [True, False]) +@pytest.mark.parametrize("cache_hit", [True, False]) +@patch(BASE_INVENTORY_PARSE) +def test_inventory_parse(m_parse, inventory, user_cache_directive, cache, cache_hit): + inventory_data = MagicMock() + loader = MagicMock() + path = generate_random_string(with_punctuation=False, with_digits=False) + + options = {} + options["regions"] = [f"us-east-{d}" for d in range(random.randint(1, 5))] + options["strict_permissions"] = random.choice((True, False)) + options["statuses"] = generate_random_string(with_punctuation=False) + + options["cache"] = user_cache_directive + + def get_option_side_effect(v): + return options.get(v) + + inventory.get_option.side_effect = get_option_side_effect + + cache_key = path + generate_random_string() + inventory.get_cache_key.return_value = cache_key + + cache_key_value = generate_random_string() + if cache_hit: + inventory._cache[cache_key] = cache_key_value + + inventory._populate = MagicMock() + inventory._populate_from_cache = MagicMock() + inventory._get_all_hosts = MagicMock() + all_hosts = [ + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + ] + inventory._get_all_hosts.return_value = all_hosts + + format_cache_key_value = f"format_inventory_{all_hosts}" + inventory._format_inventory = MagicMock() + inventory._format_inventory.return_value = format_cache_key_value + + inventory.parse(inventory_data, loader, path, cache) + + m_parse.assert_called_with(inventory_data, loader, path, cache=cache) + + if not cache or not user_cache_directive or (cache and user_cache_directive and not cache_hit): + inventory._get_all_hosts.assert_called_with( + options["regions"], + options["strict_permissions"], + options["statuses"], + ) + inventory._populate.assert_called_with(all_hosts) + inventory._format_inventory.assert_called_with(all_hosts) + else: + inventory._get_all_hosts.assert_not_called() + + if cache and user_cache_directive and cache_hit: + inventory._populate_from_cache.assert_called_with(cache_key_value) + + if cache and user_cache_directive and not cache_hit or (not cache and user_cache_directive): + # validate that cache was populated + assert inventory._cache[cache_key] == format_cache_key_value diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/conftest.py b/ansible_collections/community/aws/tests/unit/plugins/modules/conftest.py index a7d1e0475..ba4a1adc3 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/conftest.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/conftest.py @@ -1,16 +1,14 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type import json import pytest -from ansible.module_utils.six import string_types from ansible.module_utils._text import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.module_utils.six import string_types @pytest.fixture @@ -18,14 +16,14 @@ def patch_ansible_module(request, mocker): if isinstance(request.param, string_types): args = request.param elif isinstance(request.param, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in request.param: - request.param = {'ANSIBLE_MODULE_ARGS': request.param} - if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + if "ANSIBLE_MODULE_ARGS" not in request.param: + request.param = {"ANSIBLE_MODULE_ARGS": request.param} + if "_ansible_remote_tmp" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_keep_remote_files"] = False args = json.dumps(request.param) else: - raise Exception('Malformed data to the patch_ansible_module pytest fixture') + raise Exception("Malformed data to the patch_ansible_module pytest fixture") - mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + mocker.patch("ansible.module_utils.basic._ANSIBLE_ARGS", to_bytes(args)) diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_acm_certificate.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_acm_certificate.py index 726601fe8..608246217 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_acm_certificate.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_acm_certificate.py @@ -15,18 +15,21 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type from pprint import pprint +from ansible.module_utils._text import to_text + from ansible_collections.community.aws.plugins.modules.acm_certificate import chain_compare from ansible_collections.community.aws.plugins.modules.acm_certificate import pem_chain_split -from ansible.module_utils._text import to_text def test_chain_compare(): - # The functions we're testing take module as an argument # Just so they can call module.fail_json # Let's just use None for the unit tests, @@ -34,14 +37,14 @@ def test_chain_compare(): # And if they do, fail_json is not applicable module = None - fixture_suffix = 'tests/unit/plugins/modules/fixtures/certs' + fixture_suffix = "tests/unit/plugins/modules/fixtures/certs" # Test chain split function on super simple (invalid) certs - expected = ['aaa', 'bbb', 'ccc'] + expected = ["aaa", "bbb", "ccc"] - for fname in ['simple-chain-a.cert', 'simple-chain-b.cert']: - path = fixture_suffix + '/' + fname - with open(path, 'r') as f: + for fname in ["simple-chain-a.cert", "simple-chain-b.cert"]: + path = fixture_suffix + "/" + fname + with open(path, "r") as f: pem = to_text(f.read()) actual = pem_chain_split(module, pem) actual = [a.strip() for a in actual] @@ -50,76 +53,60 @@ def test_chain_compare(): pprint(expected) print("Actual:") pprint(actual) - raise AssertionError("Failed to properly split %s" % fname) + raise AssertionError(f"Failed to properly split {fname}") # Now test real chains # chains with same same_as should be considered equal test_chains = [ - { # Original Cert chain - 'path': fixture_suffix + '/chain-1.0.cert', - 'same_as': 1, - 'length': 3 - }, - { # Same as 1.0, but longer PEM lines - 'path': fixture_suffix + '/chain-1.1.cert', - 'same_as': 1, - 'length': 3 - }, + {"path": fixture_suffix + "/chain-1.0.cert", "same_as": 1, "length": 3}, # Original Cert chain + {"path": fixture_suffix + "/chain-1.1.cert", "same_as": 1, "length": 3}, # Same as 1.0, but longer PEM lines { # Same as 1.0, but without the stuff before each -------- - 'path': fixture_suffix + '/chain-1.2.cert', - 'same_as': 1, - 'length': 3 + "path": fixture_suffix + "/chain-1.2.cert", + "same_as": 1, + "length": 3, }, { # Same as 1.0, but in a different order, so should be considered different - 'path': fixture_suffix + '/chain-1.3.cert', - 'same_as': 2, - 'length': 3 + "path": fixture_suffix + "/chain-1.3.cert", + "same_as": 2, + "length": 3, }, { # Same as 1.0, but with last link missing - 'path': fixture_suffix + '/chain-1.4.cert', - 'same_as': 3, - 'length': 2 + "path": fixture_suffix + "/chain-1.4.cert", + "same_as": 3, + "length": 2, }, { # Completely different cert chain to all the others - 'path': fixture_suffix + '/chain-4.cert', - 'same_as': 4, - 'length': 3 - }, - { # Single cert - 'path': fixture_suffix + '/a.pem', - 'same_as': 5, - 'length': 1 + "path": fixture_suffix + "/chain-4.cert", + "same_as": 4, + "length": 3, }, - { # a different, single cert - 'path': fixture_suffix + '/b.pem', - 'same_as': 6, - 'length': 1 - } + {"path": fixture_suffix + "/a.pem", "same_as": 5, "length": 1}, # Single cert + {"path": fixture_suffix + "/b.pem", "same_as": 6, "length": 1}, # a different, single cert ] for chain in test_chains: - with open(chain['path'], 'r') as f: - chain['pem_text'] = to_text(f.read()) + with open(chain["path"], "r") as f: + chain["pem_text"] = to_text(f.read()) # Test to make sure our regex isn't too greedy - chain['split'] = pem_chain_split(module, chain['pem_text']) - if len(chain['split']) != chain['length']: + chain["split"] = pem_chain_split(module, chain["pem_text"]) + if len(chain["split"]) != chain["length"]: print("Cert before split") - print(chain['pem_text']) + print(chain["pem_text"]) print("Cert after split") - pprint(chain['split']) - print("path: %s" % chain['path']) - print("Expected chain length: %d" % chain['length']) - print("Actual chain length: %d" % len(chain['split'])) - raise AssertionError("Chain %s was not split properly" % chain['path']) + pprint(chain["split"]) + print(f"path: {chain['path']}") + print(f"Expected chain length: {int(chain['length'])}") + print(f"Actual chain length: {len(chain['split'])}") + raise AssertionError(f"Chain {chain['path']} was not split properly") for chain_a in test_chains: for chain_b in test_chains: - expected = (chain_a['same_as'] == chain_b['same_as']) + expected = chain_a["same_as"] == chain_b["same_as"] # Now test the comparison function - actual = chain_compare(module, chain_a['pem_text'], chain_b['pem_text']) + actual = chain_compare(module, chain_a["pem_text"], chain_b["pem_text"]) if expected != actual: - print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a['path'], chain_b['path'])) - print("Expected %s got %s" % (str(expected), str(actual))) + print(f"Error, unexpected comparison result between \n{chain_a['path']}\nand\n{chain_b['path']}") + print(f"Expected {str(expected)} got {str(actual)}") assert expected == actual diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_api_gateway.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_api_gateway.py index a6f2c3e91..f0d9de8fa 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_api_gateway.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_api_gateway.py @@ -5,17 +5,21 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import sys + import pytest from ansible_collections.amazon.aws.plugins.module_utils import modules as aws_modules from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 -from ansible_collections.community.aws.tests.unit.plugins.modules.utils import set_module_args import ansible_collections.community.aws.plugins.modules.api_gateway as agw +from ansible_collections.community.aws.tests.unit.plugins.modules.utils import set_module_args if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules") @@ -25,7 +29,7 @@ exit_return_dict = {} def fake_exit_json(self, **kwargs): - """ store the kwargs given to exit_json rather than putting them out to stdout""" + """store the kwargs given to exit_json rather than putting them out to stdout""" global exit_return_dict exit_return_dict = kwargs sys.exit(0) @@ -33,7 +37,6 @@ def fake_exit_json(self, **kwargs): def test_upload_api(monkeypatch): class FakeConnection: - def put_rest_api(self, *args, **kwargs): assert kwargs["body"] == "the-swagger-text-is-fake" return {"msg": "success!"} @@ -46,25 +49,29 @@ def test_upload_api(monkeypatch): monkeypatch.setattr(aws_modules, "boto3_conn", return_fake_connection) monkeypatch.setattr(aws_modules.AnsibleAWSModule, "exit_json", fake_exit_json) - set_module_args({ - "api_id": "fred", - "state": "present", - "swagger_text": "the-swagger-text-is-fake", - "region": 'mars-north-1', - "_ansible_tmpdir": "/tmp/ansibl-abcdef", - }) + set_module_args( + { + "api_id": "fred", + "state": "present", + "swagger_text": "the-swagger-text-is-fake", + "region": "mars-north-1", + "_ansible_tmpdir": "/tmp/ansibl-abcdef", + } + ) with pytest.raises(SystemExit): agw.main() assert exit_return_dict["changed"] def test_warn_if_region_not_specified(): - - set_module_args({ - "name": "api_gateway", - "state": "present", - "runtime": 'python2.7', - "role": 'arn:aws:iam::123456789012:role/lambda_basic_execution', - "handler": 'lambda_python.my_handler'}) + set_module_args( + { + "name": "api_gateway", + "state": "present", + "runtime": "python2.7", + "role": "arn:aws:iam::123456789012:role/lambda_basic_execution", + "handler": "lambda_python.my_handler", + } + ) with pytest.raises(SystemExit): print(agw.main()) diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_data_pipeline.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_data_pipeline.py index 1a188e8ed..a2bd06ad8 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_data_pipeline.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_data_pipeline.py @@ -4,12 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import collections -import os import json +import os + import pytest from ansible.module_utils._text import to_text @@ -19,11 +23,18 @@ try: except ImportError: pass +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +# isort: off +# pylint: disable=unused-import + +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +# pylint: enable=unused-import +# isort: on -from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 from ansible_collections.community.aws.plugins.modules import data_pipeline if not HAS_BOTO3: @@ -34,7 +45,7 @@ class FailException(Exception): pass -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def dp_setup(): """ Yield a FakeModule object, data pipeline id of a vanilla data pipeline, and data pipeline objects @@ -44,41 +55,41 @@ def dp_setup(): Dependencies = collections.namedtuple("Dependencies", ["module", "data_pipeline_id", "objects"]) # get objects to use to test populating and activating the data pipeline - if not os.getenv('PLACEBO_RECORD'): - objects = [{"name": "Every 1 day", - "id": "DefaultSchedule", - "fields": []}, - {"name": "Default", - "id": "Default", - "fields": []}] + if not os.getenv("PLACEBO_RECORD"): + objects = [ + {"name": "Every 1 day", "id": "DefaultSchedule", "fields": []}, + {"name": "Default", "id": "Default", "fields": []}, + ] else: - s3 = boto3.client('s3') + s3 = boto3.client("s3") data = s3.get_object(Bucket="ansible-test-datapipeline", Key="pipeline-object/new.json") - objects = json.loads(to_text(data['Body'].read())) + objects = json.loads(to_text(data["Body"].read())) # create a module with vanilla data pipeline parameters - params = {'name': 'ansible-test-create-pipeline', - 'description': 'ansible-datapipeline-unit-test', - 'state': 'present', - 'timeout': 300, - 'objects': [], - 'tags': {}, - 'parameters': [], - 'values': []} + params = { + "name": "ansible-test-create-pipeline", + "description": "ansible-datapipeline-unit-test", + "state": "present", + "timeout": 300, + "objects": [], + "tags": {}, + "parameters": [], + "values": [], + } module = FakeModule(**params) # yield a module, the data pipeline id, and the data pipeline objects (that are not yet defining the vanilla data pipeline) - if not os.getenv('PLACEBO_RECORD'): - yield Dependencies(module=module, data_pipeline_id='df-0590406117G8DPQZY2HA', objects=objects) + if not os.getenv("PLACEBO_RECORD"): + yield Dependencies(module=module, data_pipeline_id="df-0590406117G8DPQZY2HA", objects=objects) else: - connection = boto3.client('datapipeline') + connection = boto3.client("datapipeline") _changed, result = data_pipeline.create_pipeline(connection, module) - data_pipeline_id = result['data_pipeline']['pipeline_id'] + data_pipeline_id = result["data_pipeline"]["pipeline_id"] yield Dependencies(module=module, data_pipeline_id=data_pipeline_id, objects=objects) # remove data pipeline - if os.getenv('PLACEBO_RECORD'): - module.params.update(state='absent') + if os.getenv("PLACEBO_RECORD"): + module.params.update(state="absent") data_pipeline.delete_pipeline(connection, module) @@ -89,7 +100,7 @@ class FakeModule(object): def fail_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise FailException('FAIL') + raise FailException("FAIL") def exit_json(self, *args, **kwargs): self.exit_args = args @@ -97,91 +108,101 @@ class FakeModule(object): def test_create_pipeline_already_exists(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") changed, result = data_pipeline.create_pipeline(connection, dp_setup.module) assert changed is False - assert "Data Pipeline ansible-test-create-pipeline is present" in result['msg'] + assert "Data Pipeline ansible-test-create-pipeline is present" in result["msg"] def test_pipeline_field(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") pipeline_field_info = data_pipeline.pipeline_field(connection, dp_setup.data_pipeline_id, "@pipelineState") assert pipeline_field_info == "PENDING" def test_define_pipeline(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') - changed, result = data_pipeline.define_pipeline(connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id) + connection = placeboify.client("datapipeline") + changed, result = data_pipeline.define_pipeline( + connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id + ) assert changed is True - assert 'has been updated' in result + assert "has been updated" in result def test_deactivate_pipeline(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") _changed, result = data_pipeline.deactivate_pipeline(connection, dp_setup.module) # XXX possible bug # assert changed is True - assert "Data Pipeline ansible-test-create-pipeline deactivated" in result['msg'] + assert "Data Pipeline ansible-test-create-pipeline deactivated" in result["msg"] def test_activate_without_population(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") with pytest.raises(FailException): _changed, _result = data_pipeline.activate_pipeline(connection, dp_setup.module) - assert dp_setup.module.exit_kwargs.get('msg') == "You need to populate your pipeline before activation." + assert dp_setup.module.exit_kwargs.get("msg") == "You need to populate your pipeline before activation." def test_create_pipeline(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-unittest-create-pipeline', - 'description': 'ansible-datapipeline-unit-test', - 'state': 'present', - 'timeout': 300, - 'tags': {}} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-unittest-create-pipeline", + "description": "ansible-datapipeline-unit-test", + "state": "present", + "timeout": 300, + "tags": {}, + } m = FakeModule(**params) changed, result = data_pipeline.create_pipeline(connection, m) assert changed is True - assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline created." + assert result["msg"] == "Data Pipeline ansible-unittest-create-pipeline created." data_pipeline.delete_pipeline(connection, m) def test_create_pipeline_with_tags(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-unittest-create-pipeline_tags', - 'description': 'ansible-datapipeline-unit-test', - 'state': 'present', - 'tags': {'ansible': 'test'}, - 'timeout': 300} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-unittest-create-pipeline_tags", + "description": "ansible-datapipeline-unit-test", + "state": "present", + "tags": {"ansible": "test"}, + "timeout": 300, + } m = FakeModule(**params) changed, result = data_pipeline.create_pipeline(connection, m) assert changed is True - assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline_tags created." + assert result["msg"] == "Data Pipeline ansible-unittest-create-pipeline_tags created." data_pipeline.delete_pipeline(connection, m) def test_delete_nonexistent_pipeline(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-test-nonexistent', - 'description': 'ansible-test-nonexistent', - 'state': 'absent', - 'objects': [], - 'tags': {'ansible': 'test'}, - 'timeout': 300} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-test-nonexistent", + "description": "ansible-test-nonexistent", + "state": "absent", + "objects": [], + "tags": {"ansible": "test"}, + "timeout": 300, + } m = FakeModule(**params) changed, _result = data_pipeline.delete_pipeline(connection, m) assert changed is False def test_delete_pipeline(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-test-nonexistent', - 'description': 'ansible-test-nonexistent', - 'state': 'absent', - 'objects': [], - 'tags': {'ansible': 'test'}, - 'timeout': 300} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-test-nonexistent", + "description": "ansible-test-nonexistent", + "state": "absent", + "objects": [], + "tags": {"ansible": "test"}, + "timeout": 300, + } m = FakeModule(**params) data_pipeline.create_pipeline(connection, m) changed, _result = data_pipeline.delete_pipeline(connection, m) @@ -189,29 +210,29 @@ def test_delete_pipeline(placeboify, maybe_sleep): def test_build_unique_id_different(): - m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id'}) - m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id-different'}) + m = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id"}) + m2 = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id-different"}) assert data_pipeline.build_unique_id(m) != data_pipeline.build_unique_id(m2) def test_build_unique_id_same(): - m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}}) - m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}}) + m = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id", "tags": {"ansible": "test"}}) + m2 = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id", "tags": {"ansible": "test"}}) assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2) def test_build_unique_id_obj(): # check that the object can be different and the unique id should be the same; should be able to modify objects - m = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'first': 'object'}]}) - m2 = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'second': 'object'}]}) + m = FakeModule(**{"name": "ansible-unittest-1", "objects": [{"first": "object"}]}) + m2 = FakeModule(**{"name": "ansible-unittest-1", "objects": [{"second": "object"}]}) assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2) def test_format_tags(): - unformatted_tags = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'} + unformatted_tags = {"key1": "val1", "key2": "val2", "key3": "val3"} formatted_tags = data_pipeline.format_tags(unformatted_tags) for tag_set in formatted_tags: - assert unformatted_tags[tag_set['key']] == tag_set['value'] + assert unformatted_tags[tag_set["key"]] == tag_set["value"] def test_format_empty_tags(): @@ -221,45 +242,44 @@ def test_format_empty_tags(): def test_pipeline_description(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") dp_id = dp_setup.data_pipeline_id pipelines = data_pipeline.pipeline_description(connection, dp_id) - assert dp_id == pipelines['pipelineDescriptionList'][0]['pipelineId'] + assert dp_id == pipelines["pipelineDescriptionList"][0]["pipelineId"] def test_pipeline_description_nonexistent(placeboify, maybe_sleep): hypothetical_pipeline_id = "df-015440025PF7YGLDK47C" - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") with pytest.raises(data_pipeline.DataPipelineNotFound): data_pipeline.pipeline_description(connection, hypothetical_pipeline_id) def test_check_dp_exists_true(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") exists = data_pipeline.check_dp_exists(connection, dp_setup.data_pipeline_id) assert exists is True def test_check_dp_exists_false(placeboify, maybe_sleep): hypothetical_pipeline_id = "df-015440025PF7YGLDK47C" - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") exists = data_pipeline.check_dp_exists(connection, hypothetical_pipeline_id) assert exists is False def test_check_dp_status(placeboify, maybe_sleep, dp_setup): - inactive_states = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING'] - connection = placeboify.client('datapipeline') + inactive_states = ["INACTIVE", "PENDING", "FINISHED", "DELETING"] + connection = placeboify.client("datapipeline") state = data_pipeline.check_dp_status(connection, dp_setup.data_pipeline_id, inactive_states) assert state is True def test_activate_pipeline(placeboify, maybe_sleep, dp_setup): # use objects to define pipeline before activating - connection = placeboify.client('datapipeline') - data_pipeline.define_pipeline(connection, - module=dp_setup.module, - objects=dp_setup.objects, - dp_id=dp_setup.data_pipeline_id) + connection = placeboify.client("datapipeline") + data_pipeline.define_pipeline( + connection, module=dp_setup.module, objects=dp_setup.objects, dp_id=dp_setup.data_pipeline_id + ) changed, _result = data_pipeline.activate_pipeline(connection, dp_setup.module) assert changed is True diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_confirm_connection.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_confirm_connection.py index 63804415d..f65648dad 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_confirm_connection.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_confirm_connection.py @@ -1,28 +1,30 @@ -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import call +from unittest.mock import patch import pytest + try: from botocore.exceptions import ClientError except ImportError: pass from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 -from ansible_collections.community.aws.tests.unit.compat.mock import call -from ansible_collections.community.aws.tests.unit.compat.mock import patch + +from ansible_collections.community.aws.plugins.modules import directconnect_confirm_connection from ansible_collections.community.aws.tests.unit.plugins.modules.utils import AnsibleExitJson from ansible_collections.community.aws.tests.unit.plugins.modules.utils import AnsibleFailJson from ansible_collections.community.aws.tests.unit.plugins.modules.utils import ModuleTestCase from ansible_collections.community.aws.tests.unit.plugins.modules.utils import set_module_args -from ansible_collections.community.aws.plugins.modules import directconnect_confirm_connection - if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) -@patch('ansible_collections.amazon.aws.plugins.module_utils.core.HAS_BOTO3', new=True) +@patch("ansible_collections.amazon.aws.plugins.module_utils.core.HAS_BOTO3", new=True) @patch.object(directconnect_confirm_connection.AnsibleAWSModule, "client") class TestAWSDirectConnectConfirmConnection(ModuleTestCase): def test_missing_required_parameters(self, *args): @@ -45,22 +47,18 @@ class TestAWSDirectConnectConfirmConnection(ModuleTestCase): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } - set_module_args({ - "connection_id": "dxcon-fgq9rgot" - }) + set_module_args({"connection_id": "dxcon-fgq9rgot"}) with self.assertRaises(AnsibleExitJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["changed"] is False assert result["connection_state"] == "requested" - mock_client.return_value.describe_connections.assert_has_calls([ - call(connectionId="dxcon-fgq9rgot") - ]) + mock_client.return_value.describe_connections.assert_has_calls([call(connectionId="dxcon-fgq9rgot")]) mock_client.return_value.confirm_connection.assert_not_called() def test_get_by_name(self, mock_client): @@ -73,39 +71,31 @@ class TestAWSDirectConnectConfirmConnection(ModuleTestCase): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } - set_module_args({ - "name": "ansible-test-connection" - }) + set_module_args({"name": "ansible-test-connection"}) with self.assertRaises(AnsibleExitJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["changed"] is False assert result["connection_state"] == "requested" - mock_client.return_value.describe_connections.assert_has_calls([ - call(), - call(connectionId="dxcon-fgq9rgot") - ]) + mock_client.return_value.describe_connections.assert_has_calls([call(), call(connectionId="dxcon-fgq9rgot")]) mock_client.return_value.confirm_connection.assert_not_called() def test_missing_connection_id(self, mock_client): mock_client.return_value.describe_connections.side_effect = ClientError( - {'Error': {'Code': 'ResourceNotFoundException'}}, 'DescribeConnection') - set_module_args({ - "connection_id": "dxcon-aaaabbbb" - }) + {"Error": {"Code": "ResourceNotFoundException"}}, "DescribeConnection" + ) + set_module_args({"connection_id": "dxcon-aaaabbbb"}) with self.assertRaises(AnsibleFailJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["failed"] is True - mock_client.return_value.describe_connections.assert_has_calls([ - call(connectionId="dxcon-aaaabbbb") - ]) + mock_client.return_value.describe_connections.assert_has_calls([call(connectionId="dxcon-aaaabbbb")]) def test_missing_name(self, mock_client): mock_client.return_value.describe_connections.return_value = { @@ -117,21 +107,17 @@ class TestAWSDirectConnectConfirmConnection(ModuleTestCase): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } - set_module_args({ - "name": "foobar" - }) + set_module_args({"name": "foobar"}) with self.assertRaises(AnsibleFailJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["failed"] is True - mock_client.return_value.describe_connections.assert_has_calls([ - call() - ]) + mock_client.return_value.describe_connections.assert_has_calls([call()]) def test_confirm(self, mock_client): mock_client.return_value.describe_connections.return_value = { @@ -143,22 +129,22 @@ class TestAWSDirectConnectConfirmConnection(ModuleTestCase): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } mock_client.return_value.confirm_connection.return_value = [{}] - set_module_args({ - "connection_id": "dxcon-fgq9rgot" - }) + set_module_args({"connection_id": "dxcon-fgq9rgot"}) with self.assertRaises(AnsibleExitJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["changed"] is True - mock_client.return_value.describe_connections.assert_has_calls([ - call(connectionId="dxcon-fgq9rgot"), - call(connectionId="dxcon-fgq9rgot"), - call(connectionId="dxcon-fgq9rgot") - ]) + mock_client.return_value.describe_connections.assert_has_calls( + [ + call(connectionId="dxcon-fgq9rgot"), + call(connectionId="dxcon-fgq9rgot"), + call(connectionId="dxcon-fgq9rgot"), + ] + ) mock_client.return_value.confirm_connection.assert_called_once_with(connectionId="dxcon-fgq9rgot") diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_connection.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_connection.py index 65ba0a3f0..f9a620843 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_connection.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_connection.py @@ -4,81 +4,90 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import pytest from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +# isort: off +# pylint: disable=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +# pylint: enable=unused-import +# isort: on from ansible_collections.community.aws.plugins.modules import directconnect_connection if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) # When rerecording these tests, create a stand alone connection with default values in us-west-2 # with the name ansible-test-connection and set connection_id to the appropriate value connection_id = "dxcon-fgq9rgot" -connection_name = 'ansible-test-connection' +connection_name = "ansible-test-connection" def test_connection_status(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - status = directconnect_connection.connection_status(client, connection_id)['connection'] - assert status['connectionName'] == connection_name - assert status['connectionId'] == connection_id + client = placeboify.client("directconnect") + status = directconnect_connection.connection_status(client, connection_id)["connection"] + assert status["connectionName"] == connection_name + assert status["connectionId"] == connection_id def test_connection_exists_by_id(placeboify, maybe_sleep): - client = placeboify.client('directconnect') + client = placeboify.client("directconnect") exists = directconnect_connection.connection_exists(client, connection_id) assert exists == connection_id def test_connection_exists_by_name(placeboify, maybe_sleep): - client = placeboify.client('directconnect') + client = placeboify.client("directconnect") exists = directconnect_connection.connection_exists(client, None, connection_name) assert exists == connection_id def test_connection_does_not_exist(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - exists = directconnect_connection.connection_exists(client, 'dxcon-notthere') + client = placeboify.client("directconnect") + exists = directconnect_connection.connection_exists(client, "dxcon-notthere") assert exists is False def test_changed_properties(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - status = directconnect_connection.connection_status(client, connection_id)['connection'] + client = placeboify.client("directconnect") + status = directconnect_connection.connection_status(client, connection_id)["connection"] location = "differentlocation" - bandwidth = status['bandwidth'] + bandwidth = status["bandwidth"] assert directconnect_connection.changed_properties(status, location, bandwidth) is True def test_associations_are_not_updated(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - status = directconnect_connection.connection_status(client, connection_id)['connection'] - lag_id = status.get('lagId') + client = placeboify.client("directconnect") + status = directconnect_connection.connection_status(client, connection_id)["connection"] + lag_id = status.get("lagId") assert directconnect_connection.update_associations(client, status, connection_id, lag_id) is False def test_create_and_delete(placeboify, maybe_sleep): - client = placeboify.client('directconnect') + client = placeboify.client("directconnect") created_conn = verify_create_works(placeboify, maybe_sleep, client) verify_delete_works(placeboify, maybe_sleep, client, created_conn) def verify_create_works(placeboify, maybe_sleep, client): - created = directconnect_connection.create_connection(client=client, - location="EqSE2", - bandwidth="1Gbps", - name="ansible-test-2", - lag_id=None) - assert created.startswith('dxcon') + created = directconnect_connection.create_connection( + client=client, location="EqSE2", bandwidth="1Gbps", name="ansible-test-2", lag_id=None + ) + assert created.startswith("dxcon") return created diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py index 90c8d9604..134be7167 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py @@ -4,40 +4,52 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type -import pytest -import os import collections +import os -# Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +import pytest from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +# Magic... Incorrectly identified by pylint as unused +# isort: off +# pylint: disable=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +# pylint: enable=unused-import +# isort: on + from ansible_collections.community.aws.plugins.modules import directconnect_link_aggregation_group as lag_module if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) @pytest.fixture(scope="module") def dependencies(): - # each LAG dict will contain the keys: module, connections, virtual_interfaces Dependencies = collections.namedtuple("Dependencies", ["lag_1", "lag_2"]) lag_1 = dict() lag_2 = dict() - vanilla_params = {"name": "ansible_lag_1", - "location": "EqSe2", - "num_connections": 1, - "min_links": 0, - "bandwidth": "1Gbps"} + vanilla_params = { + "name": "ansible_lag_1", + "location": "EqSe2", + "num_connections": 1, + "min_links": 0, + "bandwidth": "1Gbps", + } for lag in ("ansible_lag_1", "ansible_lag_2"): params = dict(vanilla_params) @@ -49,10 +61,19 @@ def dependencies(): if os.getenv("PLACEBO_RECORD"): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(lag_1["module"], boto3=True) - client = boto3_conn(lag_1["module"], conn_type="client", resource="directconnect", region=region, endpoint=ec2_url, **aws_connect_kwargs) + client = boto3_conn( + lag_1["module"], + conn_type="client", + resource="directconnect", + region=region, + endpoint=ec2_url, + **aws_connect_kwargs, + ) # See if link aggregation groups exist for name in ("ansible_lag_1", "ansible_lag_2"): - lag_id = lag_module.create_lag(client, num_connections=1, location="EqSe2", bandwidth="1Gbps", name=name, connection_id=None) + lag_id = lag_module.create_lag( + client, num_connections=1, location="EqSe2", bandwidth="1Gbps", name=name, connection_id=None + ) if name == "ansible_lag_1": lag_1["lag_id"] = lag_id lag_1["name"] = name @@ -87,10 +108,7 @@ class FakeModule(object): def test_nonexistent_lag_status(placeboify, maybe_sleep): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id="doesntexist", - lag_name="doesntexist", - verify=True) + exists = lag_module.lag_exists(client=client, lag_id="doesntexist", lag_name="doesntexist", verify=True) assert not exists @@ -103,28 +121,19 @@ def test_lag_status(placeboify, maybe_sleep, dependencies): def test_lag_exists(placeboify, maybe_sleep, dependencies): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id=dependencies.lag_1.get("lag_id"), - lag_name=None, - verify=True) + exists = lag_module.lag_exists(client=client, lag_id=dependencies.lag_1.get("lag_id"), lag_name=None, verify=True) assert exists def test_lag_exists_using_name(placeboify, maybe_sleep, dependencies): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id=None, - lag_name=dependencies.lag_1.get("name"), - verify=True) + exists = lag_module.lag_exists(client=client, lag_id=None, lag_name=dependencies.lag_1.get("name"), verify=True) assert exists def test_nonexistent_lag_does_not_exist(placeboify, maybe_sleep): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id="dxlag-XXXXXXXX", - lag_name="doesntexist", - verify=True) + exists = lag_module.lag_exists(client=client, lag_id="dxlag-XXXXXXXX", lag_name="doesntexist", verify=True) assert not exists @@ -143,19 +152,21 @@ def test_lag_changed_true_no(placeboify, maybe_sleep, dependencies): def test_update_lag(placeboify, maybe_sleep, dependencies): client = placeboify.client("directconnect") status_before = lag_module.lag_status(client=client, lag_id=dependencies.lag_2.get("lag_id")) - lag_module.update_lag(client, - lag_id=dependencies.lag_2.get("lag_id"), - lag_name="ansible_lag_2_update", - min_links=0, - wait=False, - wait_timeout=0, - num_connections=1) + lag_module.update_lag( + client, + lag_id=dependencies.lag_2.get("lag_id"), + lag_name="ansible_lag_2_update", + min_links=0, + wait=False, + wait_timeout=0, + num_connections=1, + ) status_after = lag_module.lag_status(client=client, lag_id=dependencies.lag_2.get("lag_id")) assert status_before != status_after # remove the lag name from the statuses and verify it was the only thing changed - del status_before['lagName'] - del status_after['lagName'] + del status_before["lagName"] + del status_after["lagName"] assert status_before == status_after diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_virtual_interface.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_virtual_interface.py index 4f0086421..62b511bde 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_virtual_interface.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_directconnect_virtual_interface.py @@ -4,20 +4,31 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import pytest from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +# isort: off +# pylint: disable=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +# pylint: enable=unused-import +# isort: on from ansible_collections.community.aws.plugins.modules import directconnect_virtual_interface if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) class FailException(Exception): @@ -46,10 +57,7 @@ def test_find_unique_vi_by_connection_id(placeboify, maybe_sleep): def test_find_unique_vi_by_vi_id(placeboify, maybe_sleep): client = placeboify.client("directconnect") - vi_id = directconnect_virtual_interface.find_unique_vi(client, - None, - "dxvif-aaaaaaaaa", - None) + vi_id = directconnect_virtual_interface.find_unique_vi(client, None, "dxvif-aaaaaaaaa", None) assert vi_id == "dxvif-aaaaaaaa" @@ -61,47 +69,38 @@ def test_find_unique_vi_by_name(placeboify, maybe_sleep): def test_find_unique_vi_returns_multiple(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate="dxcon-aaaaaaaa", - public=False, - name=None) + module = FakeModule(state="present", id_to_associate="dxcon-aaaaaaaa", public=False, name=None) with pytest.raises(FailException): - directconnect_virtual_interface.ensure_state( - client, - module - ) + directconnect_virtual_interface.ensure_state(client, module) assert "Multiple virtual interfaces were found" in module.exit_kwargs["msg"] def test_find_unique_vi_returns_missing_for_vi_id(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate=None, - public=False, - name=None, - virtual_interface_id="dxvif-aaaaaaaa") + module = FakeModule( + state="present", id_to_associate=None, public=False, name=None, virtual_interface_id="dxvif-aaaaaaaa" + ) with pytest.raises(FailException): - directconnect_virtual_interface.ensure_state( - client, - module - ) + directconnect_virtual_interface.ensure_state(client, module) assert "The virtual interface dxvif-aaaaaaaa does not exist" in module.exit_kwargs["msg"] def test_construct_public_vi(): - module = FakeModule(state="present", - id_to_associate=None, - public=True, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate=None, + public=True, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) vi = directconnect_virtual_interface.assemble_params_for_creating_vi(module.params) assert vi == { "virtualInterfaceName": "aaaaaaaa", @@ -111,24 +110,26 @@ def test_construct_public_vi(): "amazonAddress": "169.254.0.2/30", "customerAddress": "169.254.0.1/30", "addressFamily": "ipv4", - "routeFilterPrefixes": [{"cidr": "10.88.0.0/30"}] + "routeFilterPrefixes": [{"cidr": "10.88.0.0/30"}], } def test_construct_private_vi_with_virtual_gateway_id(): - module = FakeModule(state="present", - id_to_associate=None, - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate=None, + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) vi = directconnect_virtual_interface.assemble_params_for_creating_vi(module.params) assert vi == { "virtualInterfaceName": "aaaaaaaa", @@ -138,24 +139,26 @@ def test_construct_private_vi_with_virtual_gateway_id(): "amazonAddress": "169.254.0.2/30", "customerAddress": "169.254.0.1/30", "addressFamily": "ipv4", - "virtualGatewayId": "xxxx" + "virtualGatewayId": "xxxx", } def test_construct_private_vi_with_direct_connect_gateway_id(): - module = FakeModule(state="present", - id_to_associate=None, - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id=None, - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate=None, + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id=None, + direct_connect_gateway_id="yyyy", + ) vi = directconnect_virtual_interface.assemble_params_for_creating_vi(module.params) print(vi) assert vi == { @@ -166,26 +169,28 @@ def test_construct_private_vi_with_direct_connect_gateway_id(): "amazonAddress": "169.254.0.2/30", "customerAddress": "169.254.0.1/30", "addressFamily": "ipv4", - "directConnectGatewayId": "yyyy" + "directConnectGatewayId": "yyyy", } def test_create_public_vi(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate='dxcon-aaaaaaaa', - virtual_interface_id=None, - public=True, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate="dxcon-aaaaaaaa", + virtual_interface_id=None, + public=True, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) changed, latest_state = directconnect_virtual_interface.ensure_state(client, module) assert changed is True assert latest_state is not None @@ -193,20 +198,22 @@ def test_create_public_vi(placeboify, maybe_sleep): def test_create_private_vi(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate='dxcon-aaaaaaaa', - virtual_interface_id=None, - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate="dxcon-aaaaaaaa", + virtual_interface_id=None, + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) changed, latest_state = directconnect_virtual_interface.ensure_state(client, module) assert changed is True assert latest_state is not None @@ -214,20 +221,22 @@ def test_create_private_vi(placeboify, maybe_sleep): def test_delete_vi(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="absent", - id_to_associate='dxcon-aaaaaaaa', - virtual_interface_id='dxvif-aaaaaaaa', - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id=None, - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="absent", + id_to_associate="dxcon-aaaaaaaa", + virtual_interface_id="dxvif-aaaaaaaa", + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id=None, + direct_connect_gateway_id="yyyy", + ) changed, latest_state = directconnect_virtual_interface.ensure_state(client, module) assert changed is True assert latest_state == {} diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_vpc_vpn.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_vpc_vpn.py index 88a1aea83..2b5db4226 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_vpc_vpn.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_vpc_vpn.py @@ -1,21 +1,29 @@ # (c) 2017 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import os -import pytest -# Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import +import pytest -import ansible_collections.amazon.aws.plugins.module_utils.modules as aws_modules import ansible_collections.amazon.aws.plugins.module_utils.retries as aws_retries -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info + +# Magic... Incorrectly identified by pylint as unused +# isort: off +# pylint: disable=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +# pylint: enable=unused-import +# isort: on from ansible_collections.community.aws.plugins.modules import ec2_vpc_vpn @@ -31,12 +39,12 @@ class FakeModule(object): def fail_json_aws(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise FailException('FAIL') + raise FailException("FAIL") def fail_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise FailException('FAIL') + raise FailException("FAIL") def exit_json(self, *args, **kwargs): self.exit_args = args @@ -45,36 +53,44 @@ class FakeModule(object): def get_vgw(connection): # see if two vgw exist and return them if so - vgw = connection.describe_vpn_gateways(Filters=[{'Name': 'tag:Ansible_VPN', 'Values': ['Test']}]) - if len(vgw['VpnGateways']) >= 2: - return [vgw['VpnGateways'][0]['VpnGatewayId'], vgw['VpnGateways'][1]['VpnGatewayId']] + vgw = connection.describe_vpn_gateways(Filters=[{"Name": "tag:Ansible_VPN", "Values": ["Test"]}]) + if len(vgw["VpnGateways"]) >= 2: + return [vgw["VpnGateways"][0]["VpnGatewayId"], vgw["VpnGateways"][1]["VpnGatewayId"]] # otherwise create two and return them - vgw_1 = connection.create_vpn_gateway(Type='ipsec.1') - vgw_2 = connection.create_vpn_gateway(Type='ipsec.1') + vgw_1 = connection.create_vpn_gateway(Type="ipsec.1") + vgw_2 = connection.create_vpn_gateway(Type="ipsec.1") for resource in (vgw_1, vgw_2): - connection.create_tags(Resources=[resource['VpnGateway']['VpnGatewayId']], Tags=[{'Key': 'Ansible_VPN', 'Value': 'Test'}]) - return [vgw_1['VpnGateway']['VpnGatewayId'], vgw_2['VpnGateway']['VpnGatewayId']] + connection.create_tags( + Resources=[resource["VpnGateway"]["VpnGatewayId"]], Tags=[{"Key": "Ansible_VPN", "Value": "Test"}] + ) + return [vgw_1["VpnGateway"]["VpnGatewayId"], vgw_2["VpnGateway"]["VpnGatewayId"]] def get_cgw(connection): # see if two cgw exist and return them if so - cgw = connection.describe_customer_gateways(DryRun=False, Filters=[{'Name': 'state', 'Values': ['available']}, - {'Name': 'tag:Name', 'Values': ['Ansible-CGW']}]) - if len(cgw['CustomerGateways']) >= 2: - return [cgw['CustomerGateways'][0]['CustomerGatewayId'], cgw['CustomerGateways'][1]['CustomerGatewayId']] + cgw = connection.describe_customer_gateways( + DryRun=False, + Filters=[{"Name": "state", "Values": ["available"]}, {"Name": "tag:Name", "Values": ["Ansible-CGW"]}], + ) + if len(cgw["CustomerGateways"]) >= 2: + return [cgw["CustomerGateways"][0]["CustomerGatewayId"], cgw["CustomerGateways"][1]["CustomerGatewayId"]] # otherwise create and return them - cgw_1 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='9.8.7.6', BgpAsn=65000) - cgw_2 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='5.4.3.2', BgpAsn=65000) + cgw_1 = connection.create_customer_gateway(DryRun=False, Type="ipsec.1", PublicIp="9.8.7.6", BgpAsn=65000) + cgw_2 = connection.create_customer_gateway(DryRun=False, Type="ipsec.1", PublicIp="5.4.3.2", BgpAsn=65000) for resource in (cgw_1, cgw_2): - connection.create_tags(Resources=[resource['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Ansible-CGW', 'Value': 'Test'}]) - return [cgw_1['CustomerGateway']['CustomerGatewayId'], cgw_2['CustomerGateway']['CustomerGatewayId']] + connection.create_tags( + Resources=[resource["CustomerGateway"]["CustomerGatewayId"]], Tags=[{"Key": "Ansible-CGW", "Value": "Test"}] + ) + return [cgw_1["CustomerGateway"]["CustomerGatewayId"], cgw_2["CustomerGateway"]["CustomerGatewayId"]] def get_dependencies(): - if os.getenv('PLACEBO_RECORD'): + if os.getenv("PLACEBO_RECORD"): module = FakeModule(**{}) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + connection = boto3_conn( + module, conn_type="client", resource="ec2", region=region, endpoint=ec2_url, **aws_connect_kwargs + ) vgw = get_vgw(connection) cgw = get_cgw(connection) else: @@ -85,9 +101,9 @@ def get_dependencies(): def setup_mod_conn(placeboify, params): - conn = placeboify.client('ec2') + conn = placeboify.client("ec2") retry_decorator = aws_retries.AWSRetry.jittered_backoff() - wrapped_conn = aws_modules._RetryingBotoClientWrapper(conn, retry_decorator) + wrapped_conn = aws_retries.RetryingBotoClientWrapper(conn, retry_decorator) m = FakeModule(**params) return m, wrapped_conn @@ -97,23 +113,25 @@ def make_params(cgw, vgw, tags=None, filters=None, routes=None): filters = {} if filters is None else filters routes = [] if routes is None else routes - return {'customer_gateway_id': cgw, - 'static_only': True, - 'vpn_gateway_id': vgw, - 'connection_type': 'ipsec.1', - 'purge_tags': True, - 'tags': tags, - 'filters': filters, - 'routes': routes, - 'delay': 15, - 'wait_timeout': 600} + return { + "customer_gateway_id": cgw, + "static_only": True, + "vpn_gateway_id": vgw, + "connection_type": "ipsec.1", + "purge_tags": True, + "tags": tags, + "filters": filters, + "routes": routes, + "delay": 15, + "wait_timeout": 600, + } def make_conn(placeboify, module, connection): - customer_gateway_id = module.params['customer_gateway_id'] - static_only = module.params['static_only'] - vpn_gateway_id = module.params['vpn_gateway_id'] - connection_type = module.params['connection_type'] + customer_gateway_id = module.params["customer_gateway_id"] + static_only = module.params["static_only"] + vpn_gateway_id = module.params["vpn_gateway_id"] + connection_type = module.params["connection_type"] changed = True vpn = ec2_vpc_vpn.create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type) return changed, vpn @@ -124,7 +142,7 @@ def tear_down_conn(placeboify, connection, vpn_connection_id): def setup_req(placeboify, number_of_results=1): - ''' returns dependencies for VPN connections ''' + """returns dependencies for VPN connections""" assert number_of_results in (1, 2) results = [] cgw, vgw = get_dependencies() @@ -133,7 +151,7 @@ def setup_req(placeboify, number_of_results=1): m, conn = setup_mod_conn(placeboify, params) vpn = ec2_vpc_vpn.ensure_present(conn, params)[1] - results.append({'module': m, 'connection': conn, 'vpn': vpn, 'params': params}) + results.append({"module": m, "connection": conn, "vpn": vpn, "params": params}) if number_of_results == 1: return results[0] else: @@ -144,41 +162,44 @@ def test_find_connection_vpc_conn_id(placeboify, maybe_sleep): # setup dependencies for 2 vpn connections dependencies = setup_req(placeboify, 2) dep1, dep2 = dependencies[0], dependencies[1] - params1, vpn1, _m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection'] - _params2, vpn2, _m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection'] + params1, vpn1, _m1, conn1 = dep1["params"], dep1["vpn"], dep1["module"], dep1["connection"] + _params2, vpn2, _m2, conn2 = dep2["params"], dep2["vpn"], dep2["module"], dep2["connection"] # find the connection with a vpn_connection_id and assert it is the expected one - assert vpn1['VpnConnectionId'] == ec2_vpc_vpn.find_connection(conn1, params1, vpn1['VpnConnectionId'])['VpnConnectionId'] + assert ( + vpn1["VpnConnectionId"] + == ec2_vpc_vpn.find_connection(conn1, params1, vpn1["VpnConnectionId"])["VpnConnectionId"] + ) - tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId']) - tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId']) + tear_down_conn(placeboify, conn1, vpn1["VpnConnectionId"]) + tear_down_conn(placeboify, conn2, vpn2["VpnConnectionId"]) def test_find_connection_filters(placeboify, maybe_sleep): # setup dependencies for 2 vpn connections dependencies = setup_req(placeboify, 2) dep1, dep2 = dependencies[0], dependencies[1] - params1, vpn1, _m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection'] - params2, vpn2, _m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection'] + params1, vpn1, _m1, conn1 = dep1["params"], dep1["vpn"], dep1["module"], dep1["connection"] + params2, vpn2, _m2, conn2 = dep2["params"], dep2["vpn"], dep2["module"], dep2["connection"] # update to different tags - params1.update(tags={'Wrong': 'Tag'}) - params2.update(tags={'Correct': 'Tag'}) + params1.update(tags={"Wrong": "Tag"}) + params2.update(tags={"Correct": "Tag"}) ec2_vpc_vpn.ensure_present(conn1, params1) ec2_vpc_vpn.ensure_present(conn2, params2) # create some new parameters for a filter - params = {'filters': {'tags': {'Correct': 'Tag'}}} + params = {"filters": {"tags": {"Correct": "Tag"}}} # find the connection that has the parameters above found = ec2_vpc_vpn.find_connection(conn1, params) # assert the correct connection was found - assert found['VpnConnectionId'] == vpn2['VpnConnectionId'] + assert found["VpnConnectionId"] == vpn2["VpnConnectionId"] # delete the connections - tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId']) - tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId']) + tear_down_conn(placeboify, conn1, vpn1["VpnConnectionId"]) + tear_down_conn(placeboify, conn2, vpn2["VpnConnectionId"]) def test_find_connection_insufficient_filters(placeboify, maybe_sleep): @@ -186,15 +207,15 @@ def test_find_connection_insufficient_filters(placeboify, maybe_sleep): cgw, vgw = get_dependencies() # create two connections with the same tags - params = make_params(cgw[0], vgw[0], tags={'Correct': 'Tag'}) - params2 = make_params(cgw[1], vgw[1], tags={'Correct': 'Tag'}) + params = make_params(cgw[0], vgw[0], tags={"Correct": "Tag"}) + params2 = make_params(cgw[1], vgw[1], tags={"Correct": "Tag"}) m, conn = setup_mod_conn(placeboify, params) m2, conn2 = setup_mod_conn(placeboify, params2) vpn1 = ec2_vpc_vpn.ensure_present(conn, m.params)[1] vpn2 = ec2_vpc_vpn.ensure_present(conn2, m2.params)[1] # reset the parameters so only filtering by tags will occur - m.params = {'filters': {'tags': {'Correct': 'Tag'}}} + m.params = {"filters": {"tags": {"Correct": "Tag"}}} expected_message = "More than one matching VPN connection was found" # assert that multiple matching connections have been found @@ -202,13 +223,13 @@ def test_find_connection_insufficient_filters(placeboify, maybe_sleep): ec2_vpc_vpn.find_connection(conn, m.params) # delete the connections - tear_down_conn(placeboify, conn, vpn1['VpnConnectionId']) - tear_down_conn(placeboify, conn, vpn2['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn1["VpnConnectionId"]) + tear_down_conn(placeboify, conn, vpn2["VpnConnectionId"]) def test_find_connection_nonexistent(placeboify, maybe_sleep): # create parameters but don't create a connection with them - params = {'filters': {'tags': {'Correct': 'Tag'}}} + params = {"filters": {"tags": {"Correct": "Tag"}}} m, conn = setup_mod_conn(placeboify, params) # try to find a connection with matching parameters and assert None are found @@ -226,38 +247,48 @@ def test_create_connection(placeboify, maybe_sleep): # assert that changed is true and that there is a connection id assert changed is True - assert 'VpnConnectionId' in vpn + assert "VpnConnectionId" in vpn # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_create_connection_that_exists(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # try to recreate the same connection changed, vpn2 = ec2_vpc_vpn.ensure_present(conn, params) # nothing should have changed assert changed is False - assert vpn['VpnConnectionId'] == vpn2['VpnConnectionId'] + assert vpn["VpnConnectionId"] == vpn2["VpnConnectionId"] # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_modify_deleted_connection(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - _params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + _params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # delete it - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) # try to update the deleted connection - m.params.update(vpn_connection_id=vpn['VpnConnectionId']) + m.params.update(vpn_connection_id=vpn["VpnConnectionId"]) expected_message = "no VPN connection available or pending with that id" with pytest.raises(ec2_vpc_vpn.VPNConnectionException, match=expected_message): ec2_vpc_vpn.ensure_present(conn, m.params) @@ -266,7 +297,12 @@ def test_modify_deleted_connection(placeboify, maybe_sleep): def test_delete_connection(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - _params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + _params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # delete it changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params) @@ -277,7 +313,7 @@ def test_delete_connection(placeboify, maybe_sleep): def test_delete_nonexistent_connection(placeboify, maybe_sleep): # create parameters and ensure any connection matching (None) is deleted - params = {'filters': {'tags': {'ThisConnection': 'DoesntExist'}}, 'delay': 15, 'wait_timeout': 600} + params = {"filters": {"tags": {"ThisConnection": "DoesntExist"}}, "delay": 15, "wait_timeout": 600} m, conn = setup_mod_conn(placeboify, params) changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params) @@ -288,83 +324,112 @@ def test_delete_nonexistent_connection(placeboify, maybe_sleep): def test_check_for_update_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - _params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + _params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # add and remove a number of tags - m.params['tags'] = {'One': 'one', 'Two': 'two'} + m.params["tags"] = {"One": "one", "Two": "two"} ec2_vpc_vpn.ensure_present(conn, m.params) - m.params['tags'] = {'Two': 'two', 'Three': 'three', 'Four': 'four'} - changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId']) + m.params["tags"] = {"Two": "two", "Three": "three", "Four": "four"} + changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn["VpnConnectionId"]) - flat_dict_changes = boto3_tag_list_to_ansible_dict(changes['tags_to_add']) - correct_changes = boto3_tag_list_to_ansible_dict([{'Key': 'Three', 'Value': 'three'}, {'Key': 'Four', 'Value': 'four'}]) + flat_dict_changes = boto3_tag_list_to_ansible_dict(changes["tags_to_add"]) + correct_changes = boto3_tag_list_to_ansible_dict( + [{"Key": "Three", "Value": "three"}, {"Key": "Four", "Value": "four"}] + ) assert flat_dict_changes == correct_changes - assert changes['tags_to_remove'] == ['One'] + assert changes["tags_to_remove"] == ["One"] # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] - current_vgw = params['vpn_gateway_id'] + params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) + current_vgw = params["vpn_gateway_id"] # update a parameter that isn't modifiable m.params.update(vpn_gateway_id="invalidchange") - expected_message = 'You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are'.format(current_vgw) + expected_message = f"You cannot modify vpn_gateway_id, the current value of which is {current_vgw}. Modifiable VPN connection attributes are" with pytest.raises(ec2_vpc_vpn.VPNConnectionException, match=expected_message): - ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId']) + ec2_vpc_vpn.check_for_update(conn, m.params, vpn["VpnConnectionId"]) # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_add_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # add a tag to the connection - ec2_vpc_vpn.add_tags(conn, vpn['VpnConnectionId'], add=[{'Key': 'Ansible-Test', 'Value': 'VPN'}]) + ec2_vpc_vpn.add_tags(conn, vpn["VpnConnectionId"], add=[{"Key": "Ansible-Test", "Value": "VPN"}]) # assert tag is there current_vpn = ec2_vpc_vpn.find_connection(conn, params) - assert current_vpn['Tags'] == [{'Key': 'Ansible-Test', 'Value': 'VPN'}] + assert current_vpn["Tags"] == [{"Key": "Ansible-Test", "Value": "VPN"}] # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_remove_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # remove a tag from the connection - ec2_vpc_vpn.remove_tags(conn, vpn['VpnConnectionId'], remove=['Ansible-Test']) + ec2_vpc_vpn.remove_tags(conn, vpn["VpnConnectionId"], remove=["Ansible-Test"]) # assert the tag is gone current_vpn = ec2_vpc_vpn.find_connection(conn, params) - assert 'Tags' not in current_vpn + assert "Tags" not in current_vpn # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_add_routes(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # create connection with a route - ec2_vpc_vpn.add_routes(conn, vpn['VpnConnectionId'], ['195.168.2.0/24', '196.168.2.0/24']) + ec2_vpc_vpn.add_routes(conn, vpn["VpnConnectionId"], ["195.168.2.0/24", "196.168.2.0/24"]) # assert both routes are there current_vpn = ec2_vpc_vpn.find_connection(conn, params) - assert set(each['DestinationCidrBlock'] for each in current_vpn['Routes']) == set(['195.168.2.0/24', '196.168.2.0/24']) + assert set(each["DestinationCidrBlock"] for each in current_vpn["Routes"]) == set( + ["195.168.2.0/24", "196.168.2.0/24"] + ) # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_win_password.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_win_password.py index 939620120..7f832aa71 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_win_password.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_ec2_win_password.py @@ -1,8 +1,4 @@ -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -''' +""" Commands to encrypt a message that can be decrypted: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.serialization import load_pem_private_key @@ -15,9 +11,11 @@ with open(path, 'r') as f: load_pem_public_key(rsa_public_key_pem = , default_backend()) base64_cipher = public_key.encrypt('Ansible_AWS_EC2_Win_Password', PKCS1v15()) string_cipher = base64.b64encode(base64_cipher) -''' +""" import datetime +from unittest.mock import patch + import pytest from ansible.module_utils._text import to_bytes @@ -25,52 +23,53 @@ from ansible.module_utils._text import to_text from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 -from ansible_collections.community.aws.tests.unit.compat.mock import patch +from ansible_collections.community.aws.plugins.modules.ec2_win_password import ec2_win_password +from ansible_collections.community.aws.plugins.modules.ec2_win_password import setup_module_object from ansible_collections.community.aws.tests.unit.plugins.modules.utils import AnsibleExitJson from ansible_collections.community.aws.tests.unit.plugins.modules.utils import ModuleTestCase from ansible_collections.community.aws.tests.unit.plugins.modules.utils import set_module_args -from ansible_collections.community.aws.plugins.modules.ec2_win_password import setup_module_object -from ansible_collections.community.aws.plugins.modules.ec2_win_password import ec2_win_password - -fixture_prefix = 'tests/unit/plugins/modules/fixtures/certs' +fixture_prefix = "tests/unit/plugins/modules/fixtures/certs" if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules") class TestEc2WinPasswordModule(ModuleTestCase): - # Future: It would be good to generate this data on the fly and use a # temporary certificate and password. - PEM_PATH = fixture_prefix + '/ec2_win_password.pem' - UNENCRYPTED_DATA = 'Ansible_AWS_EC2_Win_Password' - ENCRYPTED_DATA = 'L2k1iFiu/TRrjGr6Rwco/T3C7xkWxUw4+YPYpGGOmP3KDdy3hT1' \ - '8RvdDJ2i0e+y7wUcH43DwbRYSlkSyALY/nzjSV9R5NChUyVs3W5' \ - '5oiVuyTKsk0lor8dFJ9z9unq14tScZHvyQ3Nx1ggOtS18S9Pk55q' \ - 'IaCXfx26ucH76VRho=' - INSTANCE_ID = 'i-12345' - - @patch('ansible_collections.community.aws.plugins.modules.s3_bucket_notification.AnsibleAWSModule.client') + PEM_PATH = fixture_prefix + "/ec2_win_password.pem" + UNENCRYPTED_DATA = "Ansible_AWS_EC2_Win_Password" + ENCRYPTED_DATA = ( + "L2k1iFiu/TRrjGr6Rwco/T3C7xkWxUw4+YPYpGGOmP3KDdy3hT1" + "8RvdDJ2i0e+y7wUcH43DwbRYSlkSyALY/nzjSV9R5NChUyVs3W5" + "5oiVuyTKsk0lor8dFJ9z9unq14tScZHvyQ3Nx1ggOtS18S9Pk55q" + "IaCXfx26ucH76VRho=" + ) + INSTANCE_ID = "i-12345" + + @patch("ansible_collections.community.aws.plugins.modules.s3_bucket_notification.AnsibleAWSModule.client") def test_decryption(self, mock_client): - path = self.PEM_PATH - with open(path, 'r') as f: + with open(path, "r") as f: pem = to_text(f.read()) with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({'instance_id': self.INSTANCE_ID, - 'key_data': pem, - }) + set_module_args( + { + "instance_id": self.INSTANCE_ID, + "key_data": pem, + } + ) module = setup_module_object() mock_client().get_password_data.return_value = { - 'InstanceId': self.INSTANCE_ID, - 'PasswordData': self.ENCRYPTED_DATA, - 'Timestamp': datetime.datetime.now(), + "InstanceId": self.INSTANCE_ID, + "PasswordData": self.ENCRYPTED_DATA, + "Timestamp": datetime.datetime.now(), } ec2_win_password(module) self.assertEqual( - exec_info.exception.args[0]['win_password'], + exec_info.exception.args[0]["win_password"], to_bytes(self.UNENCRYPTED_DATA), ) diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_iam_password_policy.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_iam_password_policy.py deleted file mode 100644 index 11de7f477..000000000 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_iam_password_policy.py +++ /dev/null @@ -1,30 +0,0 @@ -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import pytest - -from ansible_collections.community.aws.tests.unit.plugins.modules.utils import set_module_args - -from ansible_collections.community.aws.plugins.modules import iam_password_policy - - -def test_warn_if_state_not_specified(capsys): - set_module_args({ - "min_pw_length": "8", - "require_symbols": "false", - "require_numbers": "true", - "require_uppercase": "true", - "require_lowercase": "true", - "allow_pw_change": "true", - "pw_max_age": "60", - "pw_reuse_prevent": "5", - "pw_expire": "false" - }) - with pytest.raises(SystemExit): - iam_password_policy.main() - captured = capsys.readouterr() - - output = json.loads(captured.out) - assert 'missing required arguments' in output.get('msg', '') diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_opensearch.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_opensearch.py index 836e2cf07..7dcd785c9 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_opensearch.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_opensearch.py @@ -1,86 +1,85 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import functools -from ansible_collections.community.aws.plugins.module_utils.opensearch import ( - compare_domain_versions, - parse_version, -) + +from ansible_collections.community.aws.plugins.module_utils.opensearch import compare_domain_versions +from ansible_collections.community.aws.plugins.module_utils.opensearch import parse_version def test_parse_version(): test_versions = [ - ['Elasticsearch_5.5', {'engine_type': 'Elasticsearch', 'major': 5, 'minor': 5}], - ['Elasticsearch_7.1', {'engine_type': 'Elasticsearch', 'major': 7, 'minor': 1}], - ['Elasticsearch_7.10', {'engine_type': 'Elasticsearch', 'major': 7, 'minor': 10}], - ['OpenSearch_1.0', {'engine_type': 'OpenSearch', 'major': 1, 'minor': 0}], - ['OpenSearch_1.1', {'engine_type': 'OpenSearch', 'major': 1, 'minor': 1}], - ['OpenSearch_a.b', None], - ['OpenSearch_1.b', None], - ['OpenSearch_1-1', None], - ['OpenSearch_1.1.2', None], - ['OpenSearch_foo_1.1', None], - ['OpenSearch_1', None], - ['OpenSearch-1.0', None], - ['Foo_1.0', None], + ["Elasticsearch_5.5", {"engine_type": "Elasticsearch", "major": 5, "minor": 5}], + ["Elasticsearch_7.1", {"engine_type": "Elasticsearch", "major": 7, "minor": 1}], + ["Elasticsearch_7.10", {"engine_type": "Elasticsearch", "major": 7, "minor": 10}], + ["OpenSearch_1.0", {"engine_type": "OpenSearch", "major": 1, "minor": 0}], + ["OpenSearch_1.1", {"engine_type": "OpenSearch", "major": 1, "minor": 1}], + ["OpenSearch_a.b", None], + ["OpenSearch_1.b", None], + ["OpenSearch_1-1", None], + ["OpenSearch_1.1.2", None], + ["OpenSearch_foo_1.1", None], + ["OpenSearch_1", None], + ["OpenSearch-1.0", None], + ["Foo_1.0", None], ] for expected in test_versions: ret = parse_version(expected[0]) if ret != expected[1]: - raise AssertionError( - f"parse_version({expected[0]} returned {ret}, expected {expected[1]}") + raise AssertionError(f"parse_version({expected[0]} returned {ret}, expected {expected[1]}") def test_version_compare(): test_versions = [ - ['Elasticsearch_5.5', 'Elasticsearch_5.5', 0], - ['Elasticsearch_5.5', 'Elasticsearch_7.1', -1], - ['Elasticsearch_7.1', 'Elasticsearch_7.1', 0], - ['Elasticsearch_7.1', 'Elasticsearch_7.2', -1], - ['Elasticsearch_7.1', 'Elasticsearch_7.10', -1], - ['Elasticsearch_7.2', 'Elasticsearch_7.10', -1], - ['Elasticsearch_7.10', 'Elasticsearch_7.2', 1], - ['Elasticsearch_7.2', 'Elasticsearch_5.5', 1], - ['Elasticsearch_7.2', 'OpenSearch_1.0', -1], - ['Elasticsearch_7.2', 'OpenSearch_1.1', -1], - ['OpenSearch_1.1', 'OpenSearch_1.1', 0], - ['OpenSearch_1.0', 'OpenSearch_1.1', -1], - ['OpenSearch_1.1', 'OpenSearch_1.0', 1], - ['foo_1.1', 'OpenSearch_1.0', -1], - ['Elasticsearch_5.5', 'foo_1.0', 1], + ["Elasticsearch_5.5", "Elasticsearch_5.5", 0], + ["Elasticsearch_5.5", "Elasticsearch_7.1", -1], + ["Elasticsearch_7.1", "Elasticsearch_7.1", 0], + ["Elasticsearch_7.1", "Elasticsearch_7.2", -1], + ["Elasticsearch_7.1", "Elasticsearch_7.10", -1], + ["Elasticsearch_7.2", "Elasticsearch_7.10", -1], + ["Elasticsearch_7.10", "Elasticsearch_7.2", 1], + ["Elasticsearch_7.2", "Elasticsearch_5.5", 1], + ["Elasticsearch_7.2", "OpenSearch_1.0", -1], + ["Elasticsearch_7.2", "OpenSearch_1.1", -1], + ["OpenSearch_1.1", "OpenSearch_1.1", 0], + ["OpenSearch_1.0", "OpenSearch_1.1", -1], + ["OpenSearch_1.1", "OpenSearch_1.0", 1], + ["foo_1.1", "OpenSearch_1.0", -1], + ["Elasticsearch_5.5", "foo_1.0", 1], ] for v in test_versions: ret = compare_domain_versions(v[0], v[1]) if ret != v[2]: - raise AssertionError( - f"compare({v[0]}, {v[1]} returned {ret}, expected {v[2]}") + raise AssertionError(f"compare({v[0]}, {v[1]} returned {ret}, expected {v[2]}") def test_sort_versions(): input_versions = [ - 'Elasticsearch_5.6', - 'Elasticsearch_5.5', - 'Elasticsearch_7.10', - 'Elasticsearch_7.2', - 'foo_10.5', - 'OpenSearch_1.1', - 'OpenSearch_1.0', - 'Elasticsearch_7.3', + "Elasticsearch_5.6", + "Elasticsearch_5.5", + "Elasticsearch_7.10", + "Elasticsearch_7.2", + "foo_10.5", + "OpenSearch_1.1", + "OpenSearch_1.0", + "Elasticsearch_7.3", ] expected_versions = [ - 'foo_10.5', - 'Elasticsearch_5.5', - 'Elasticsearch_5.6', - 'Elasticsearch_7.2', - 'Elasticsearch_7.3', - 'Elasticsearch_7.10', - 'OpenSearch_1.0', - 'OpenSearch_1.1', + "foo_10.5", + "Elasticsearch_5.5", + "Elasticsearch_5.6", + "Elasticsearch_7.2", + "Elasticsearch_7.3", + "Elasticsearch_7.10", + "OpenSearch_1.0", + "OpenSearch_1.1", ] input_versions = sorted(input_versions, key=functools.cmp_to_key(compare_domain_versions)) if input_versions != expected_versions: - raise AssertionError( - f"Expected {expected_versions}, got {input_versions}") + raise AssertionError(f"Expected {expected_versions}, got {input_versions}") diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py index 7b22d5b00..1342a8d58 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py @@ -1,40 +1,41 @@ # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type from ansible_collections.community.aws.plugins.modules import redshift_cross_region_snapshots as rcrs mock_status_enabled = { - 'SnapshotCopyGrantName': 'snapshot-us-east-1-to-us-west-2', - 'DestinationRegion': 'us-west-2', - 'RetentionPeriod': 1, + "SnapshotCopyGrantName": "snapshot-us-east-1-to-us-west-2", + "DestinationRegion": "us-west-2", + "RetentionPeriod": 1, } mock_status_disabled = {} mock_request_illegal = { - 'snapshot_copy_grant': 'changed', - 'destination_region': 'us-west-2', - 'snapshot_retention_period': 1 + "snapshot_copy_grant": "changed", + "destination_region": "us-west-2", + "snapshot_retention_period": 1, } mock_request_update = { - 'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2', - 'destination_region': 'us-west-2', - 'snapshot_retention_period': 3 + "snapshot_copy_grant": "snapshot-us-east-1-to-us-west-2", + "destination_region": "us-west-2", + "snapshot_retention_period": 3, } mock_request_no_update = { - 'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2', - 'destination_region': 'us-west-2', - 'snapshot_retention_period': 1 + "snapshot_copy_grant": "snapshot-us-east-1-to-us-west-2", + "destination_region": "us-west-2", + "snapshot_retention_period": 1, } def test_fail_at_unsupported_operations(): - response = rcrs.requesting_unsupported_modifications( - mock_status_enabled, mock_request_illegal - ) + response = rcrs.requesting_unsupported_modifications(mock_status_enabled, mock_request_illegal) assert response is True @@ -44,9 +45,7 @@ def test_needs_update_true(): def test_no_change(): - response = rcrs.requesting_unsupported_modifications( - mock_status_enabled, mock_request_no_update - ) + response = rcrs.requesting_unsupported_modifications(mock_status_enabled, mock_request_no_update) needs_update_response = rcrs.needs_update(mock_status_enabled, mock_request_no_update) assert response is False assert needs_update_response is False diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_route53_wait.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_route53_wait.py new file mode 100644 index 000000000..57ed705c5 --- /dev/null +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_route53_wait.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from ansible_collections.community.aws.plugins.modules.route53_wait import detect_task_results + +_SINGLE_RESULT_SUCCESS = { + "changed": True, + "diff": {}, + "failed": False, + "wait_id": None, +} + +_SINGLE_RESULT_FAILED = { + "changed": False, + "failed": True, + "msg": "value of type must be one of: A, AAAA, CAA, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT, got: bar", +} + +_MULTI_RESULT_SUCCESS = { + "ansible_loop_var": "item", + "changed": True, + "diff": {}, + "failed": False, + "invocation": { + "module_args": { + "access_key": "asdf", + "alias": None, + "alias_evaluate_target_health": False, + "alias_hosted_zone_id": None, + "aws_access_key": "asdf", + "aws_ca_bundle": None, + "aws_config": None, + "aws_secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "debug_botocore_endpoint_logs": False, + "endpoint_url": None, + "failover": None, + "geo_location": None, + "health_check": None, + "hosted_zone_id": None, + "identifier": None, + "overwrite": True, + "private_zone": False, + "profile": None, + "record": "foo.example.org", + "region": None, + "retry_interval": 500, + "secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "session_token": None, + "state": "present", + "ttl": 300, + "type": "TXT", + "validate_certs": True, + "value": ["foo"], + "vpc_id": None, + "wait": False, + "wait_timeout": 300, + "weight": None, + "zone": "example.org", + }, + }, + "item": {"record": "foo.example.org", "value": "foo"}, + "wait_id": None, +} + +_MULTI_RESULT_FAILED = { + "ansible_loop_var": "item", + "changed": False, + "failed": True, + "invocation": { + "module_args": { + "access_key": "asdf", + "alias": None, + "alias_evaluate_target_health": False, + "alias_hosted_zone_id": None, + "aws_access_key": "asdf", + "aws_ca_bundle": None, + "aws_config": None, + "aws_secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "debug_botocore_endpoint_logs": False, + "endpoint_url": None, + "failover": None, + "geo_location": None, + "health_check": None, + "hosted_zone_id": None, + "identifier": None, + "overwrite": True, + "private_zone": False, + "profile": None, + "record": "foo.example.org", + "region": None, + "retry_interval": 500, + "secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "session_token": None, + "state": "present", + "ttl": 300, + "type": "bar", + "validate_certs": True, + "value": ["foo"], + "vpc_id": None, + "wait": False, + "wait_timeout": 300, + "weight": None, + "zone": "example.org", + }, + }, + "item": {"record": "foo.example.org", "value": "foo"}, + "msg": "value of type must be one of: A, AAAA, CAA, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT, got: bar", +} + + +DETECT_TASK_RESULTS_DATA = [ + [ + _SINGLE_RESULT_SUCCESS, + [ + ( + "", + _SINGLE_RESULT_SUCCESS, + ), + ], + ], + [ + { + "changed": True, + "msg": "All items completed", + "results": [ + _MULTI_RESULT_SUCCESS, + ], + "skipped": False, + }, + [ + ( + " for result #1", + _MULTI_RESULT_SUCCESS, + ), + ], + ], + [ + _SINGLE_RESULT_FAILED, + [ + ( + "", + _SINGLE_RESULT_FAILED, + ), + ], + ], + [ + { + "changed": False, + "failed": True, + "msg": "One or more items failed", + "results": [ + _MULTI_RESULT_FAILED, + ], + "skipped": False, + }, + [ + ( + " for result #1", + _MULTI_RESULT_FAILED, + ), + ], + ], +] + + +@pytest.mark.parametrize( + "input, expected", + DETECT_TASK_RESULTS_DATA, +) +def test_detect_task_results(input, expected): + assert list(detect_task_results(input)) == expected + + +DETECT_TASK_RESULTS_FAIL_DATA = [ + [ + {}, + "missing changed key", + [], + ], + [ + {"changed": True}, + "missing failed key", + [], + ], + [ + {"results": None}, + "missing changed key", + [], + ], + [ + {"results": None, "changed": True, "msg": "foo"}, + "missing skipped key", + [], + ], + [ + {"results": None, "changed": True, "msg": "foo", "skipped": False}, + "results is present, but not a list", + [], + ], + [ + {"results": [None], "changed": True, "msg": "foo", "skipped": False}, + "result 1 is not a dictionary", + [], + ], + [ + {"results": [{}], "changed": True, "msg": "foo", "skipped": False}, + "missing changed key for result 1", + [], + ], + [ + { + "results": [{"changed": True, "failed": False, "ansible_loop_var": "item", "invocation": {}}, {}], + "changed": True, + "msg": "foo", + "skipped": False, + }, + "missing changed key for result 2", + [(" for result #1", {"changed": True, "failed": False, "ansible_loop_var": "item", "invocation": {}})], + ], +] + + +@pytest.mark.parametrize( + "input, expected_exc, expected_result", + DETECT_TASK_RESULTS_FAIL_DATA, +) +def test_detect_task_fail_results(input, expected_exc, expected_result): + result = [] + with pytest.raises(ValueError) as exc: + for res in detect_task_results(input): + result.append(res) + + print(exc.value.args[0]) + assert expected_exc == exc.value.args[0] + print(result) + assert expected_result == result diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/test_ssm_inventory_info.py b/ansible_collections/community/aws/tests/unit/plugins/modules/test_ssm_inventory_info.py new file mode 100644 index 000000000..518a11a3b --- /dev/null +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/test_ssm_inventory_info.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import patch + +import pytest +from botocore.exceptions import BotoCoreError + +from ansible_collections.community.aws.plugins.modules.ssm_inventory_info import SsmInventoryInfoFailure +from ansible_collections.community.aws.plugins.modules.ssm_inventory_info import execute_module +from ansible_collections.community.aws.plugins.modules.ssm_inventory_info import get_ssm_inventory + + +def test_get_ssm_inventory(): + connection = MagicMock() + inventory_response = MagicMock() + connection.get_inventory.return_value = inventory_response + filters = MagicMock() + + assert get_ssm_inventory(connection, filters) == inventory_response + connection.get_inventory.assert_called_once_with(Filters=filters) + + +def test_get_ssm_inventory_failure(): + connection = MagicMock() + connection.get_inventory.side_effect = BotoCoreError(error="failed", operation="get_ssm_inventory") + filters = MagicMock() + + with pytest.raises(SsmInventoryInfoFailure): + get_ssm_inventory(connection, filters) + + +@patch("ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory") +def test_execute_module(m_get_ssm_inventory): + instance_id = "i-0202020202020202" + aws_inventory = { + "AgentType": "amazon-ssm-agent", + "AgentVersion": "3.2.582.0", + "ComputerName": "ip-172-31-44-166.ec2.internal", + "InstanceId": "i-039eb9b1f55934ab6", + "InstanceStatus": "Active", + "IpAddress": "172.31.44.166", + "PlatformName": "Fedora Linux", + "PlatformType": "Linux", + "PlatformVersion": "37", + "ResourceType": "EC2Instance", + } + + ansible_inventory = { + "agent_type": "amazon-ssm-agent", + "agent_version": "3.2.582.0", + "computer_name": "ip-172-31-44-166.ec2.internal", + "instance_id": "i-039eb9b1f55934ab6", + "instance_status": "Active", + "ip_address": "172.31.44.166", + "platform_name": "Fedora Linux", + "platform_type": "Linux", + "platform_version": "37", + "resource_type": "EC2Instance", + } + + m_get_ssm_inventory.return_value = { + "Entities": [{"Id": instance_id, "Data": {"AWS:InstanceInformation": {"Content": [aws_inventory]}}}], + "Status": 200, + } + + connection = MagicMock() + module = MagicMock() + module.params = dict(instance_id=instance_id) + module.exit_json.side_effect = SystemExit(1) + module.fail_json_aws.side_effect = SystemError(2) + + with pytest.raises(SystemExit): + execute_module(module, connection) + + module.exit_json.assert_called_once_with(changed=False, ssm_inventory=ansible_inventory) + + +@patch("ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory") +def test_execute_module_no_data(m_get_ssm_inventory): + instance_id = "i-0202020202020202" + + m_get_ssm_inventory.return_value = { + "Entities": [{"Id": instance_id, "Data": {}}], + } + + connection = MagicMock() + module = MagicMock() + module.params = dict(instance_id=instance_id) + module.exit_json.side_effect = SystemExit(1) + module.fail_json_aws.side_effect = SystemError(2) + + with pytest.raises(SystemExit): + execute_module(module, connection) + + module.exit_json.assert_called_once_with(changed=False, ssm_inventory={}) + + +@patch("ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory") +def test_execute_module_failure(m_get_ssm_inventory): + instance_id = "i-0202020202020202" + + m_get_ssm_inventory.side_effect = SsmInventoryInfoFailure( + exc=BotoCoreError(error="failed", operation="get_ssm_inventory"), msg="get_ssm_inventory() failed." + ) + + connection = MagicMock() + module = MagicMock() + module.params = dict(instance_id=instance_id) + module.exit_json.side_effect = SystemExit(1) + module.fail_json_aws.side_effect = SystemError(2) + + with pytest.raises(SystemError): + execute_module(module, connection) diff --git a/ansible_collections/community/aws/tests/unit/plugins/modules/utils.py b/ansible_collections/community/aws/tests/unit/plugins/modules/utils.py index 026bf2549..a3d9e31db 100644 --- a/ansible_collections/community/aws/tests/unit/plugins/modules/utils.py +++ b/ansible_collections/community/aws/tests/unit/plugins/modules/utils.py @@ -1,23 +1,20 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json +import unittest +from unittest.mock import patch -from ansible_collections.community.aws.tests.unit.compat import unittest -from ansible_collections.community.aws.tests.unit.compat.mock import patch from ansible.module_utils import basic from ansible.module_utils._text import to_bytes def set_module_args(args): - if '_ansible_remote_tmp' not in args: - args['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in args: - args['_ansible_keep_remote_files'] = False + if "_ansible_remote_tmp" not in args: + args["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in args: + args["_ansible_keep_remote_files"] = False - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) basic._ANSIBLE_ARGS = to_bytes(args) @@ -30,22 +27,21 @@ class AnsibleFailJson(Exception): def exit_json(*args, **kwargs): - if 'changed' not in kwargs: - kwargs['changed'] = False + if "changed" not in kwargs: + kwargs["changed"] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): - kwargs['failed'] = True + kwargs["failed"] = True raise AnsibleFailJson(kwargs) class ModuleTestCase(unittest.TestCase): - def setUp(self): self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module.start() - self.mock_sleep = patch('time.sleep') + self.mock_sleep = patch("time.sleep") self.mock_sleep.start() set_module_args({}) self.addCleanup(self.mock_module.stop) diff --git a/ansible_collections/community/aws/tests/unit/requirements.yml b/ansible_collections/community/aws/tests/unit/requirements.yml new file mode 100644 index 000000000..99ce82a1b --- /dev/null +++ b/ansible_collections/community/aws/tests/unit/requirements.yml @@ -0,0 +1,5 @@ +--- +collections: + - name: https://github.com/ansible-collections/amazon.aws.git + type: git + version: main diff --git a/ansible_collections/community/aws/tox.ini b/ansible_collections/community/aws/tox.ini new file mode 100644 index 000000000..179ed761c --- /dev/null +++ b/ansible_collections/community/aws/tox.ini @@ -0,0 +1,104 @@ +[tox] +skipsdist = True +envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints},linters +# Tox4 supports labels which allow us to group the environments rather than dumping all commands into a single environment +labels = + format = flynt, black, isort + lint = complexity-report, ansible-lint, black-lint, isort-lint, flake8-lint, flynt-lint + units = ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints} + +[common] +format_dirs = {toxinidir}/plugins {toxinidir}/tests + +[testenv] +description = Run the test-suite and generate a HTML coverage report +deps = + pytest + pytest-cov + ansible2.12: ansible-core>2.12,<2.13 + ansible2.13: ansible-core>2.13,<2.14 + !ansible2.12-!ansible2.13: ansible-core + pytest-ansible + -rtest-requirements.txt + with_constraints: -rtests/unit/constraints.txt +commands = pytest --cov-report html --cov plugins/callback --cov plugins/inventory --cov plugins/lookup --cov plugins/module_utils --cov plugins/modules --cov plugins/plugin_utils plugins {posargs:tests/} + +[testenv:clean] +deps = coverage +skip_install = true +commands = coverage erase + +[testenv:complexity-report] +description = Generate a HTML complexity report in the complexity directory +deps = + # See: https://github.com/lordmauve/flake8-html/issues/30 + flake8>=3.3.0,<5.0.0 + flake8-html +commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs:complexity} plugins + +[testenv:ansible-lint] +deps = + ansible-lint +commands = + ansible-lint {toxinidir}/plugins + +[testenv:black] +depends = + flynt, isort +deps = + black >=23.0, <24.0 +commands = + black {[common]format_dirs} + +[testenv:black-lint] +deps = + {[testenv:black]deps} +commands = + black -v --check --diff {[common]format_dirs} + +[testenv:isort] +deps = + isort +commands = + isort {[common]format_dirs} + +[testenv:isort-lint] +deps = + {[testenv:isort]deps} +commands = + isort --check-only --diff {[common]format_dirs} + +[testenv:flake8-lint] +deps = + flake8 +commands = + flake8 {posargs} {[common]format_dirs} + +[testenv:flynt] +deps = + flynt +commands = + flynt {[common]format_dirs} + +[testenv:flynt-lint] +deps = + flynt +commands = + flynt --dry-run {[common]format_dirs} + +[testenv:linters] +deps = + {[testenv:black]deps} + {[testenv:isort]deps} + flake8 +commands = + black -v --check {toxinidir}/plugins {toxinidir}/tests + isort --check-only --diff {toxinidir}/plugins {toxinidir}/tests + flake8 {posargs} {toxinidir}/plugins {toxinidir}/tests + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. +show-source = True +ignore = E123,E125,E203,E402,E501,E741,F401,F811,F841,W503 +max-line-length = 160 +builtins = _ |