diff options
Diffstat (limited to 'collections-debian-merged/ansible_collections/amazon/aws/plugins/modules')
40 files changed, 17310 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/__init__.py diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_facts.py new file mode 100644 index 00000000..42f12323 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_facts.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +module: aws_az_info +short_description: Gather information about availability zones in AWS. +version_added: 1.0.0 +description: + - Gather information about availability zones in AWS. + - This module was called M(amazon.aws.aws_az_facts) before Ansible 2.9. The usage did not change. +author: 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for possible filters. + - Filter names and values are case sensitive. + - You can use underscores instead of dashes (-) in the filter keys. + - Filter keys with underscores will take precedence in case of conflict. + required: false + default: {} + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +requirements: [botocore, boto3] +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all availability zones + amazon.aws.aws_az_info: + +- name: Gather information about a single availability zone + amazon.aws.aws_az_info: + filters: + zone-name: eu-west-1a +''' + +RETURN = ''' +availability_zones: + returned: on success + description: > + Availability zones that match the provided filters. Each element consists of a dict with all the information + related to that available zone. + type: list + sample: "[ + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1b' + }, + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1c' + } + ]" +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'aws_az_facts': + module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", date='2022-06-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict(module.params.get('filters')) + for k in module.params.get('filters').keys(): + if "_" in k: + sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + del sanitized_filters[k] + + try: + availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe availability zones.") + + # Turn the boto3 result into ansible_friendly_snaked_names + snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']] + + module.exit_json(availability_zones=snaked_availability_zones) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py new file mode 100644 index 00000000..42f12323 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +module: aws_az_info +short_description: Gather information about availability zones in AWS. +version_added: 1.0.0 +description: + - Gather information about availability zones in AWS. + - This module was called M(amazon.aws.aws_az_facts) before Ansible 2.9. The usage did not change. +author: 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for possible filters. + - Filter names and values are case sensitive. + - You can use underscores instead of dashes (-) in the filter keys. + - Filter keys with underscores will take precedence in case of conflict. + required: false + default: {} + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +requirements: [botocore, boto3] +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all availability zones + amazon.aws.aws_az_info: + +- name: Gather information about a single availability zone + amazon.aws.aws_az_info: + filters: + zone-name: eu-west-1a +''' + +RETURN = ''' +availability_zones: + returned: on success + description: > + Availability zones that match the provided filters. Each element consists of a dict with all the information + related to that available zone. + type: list + sample: "[ + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1b' + }, + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1c' + } + ]" +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'aws_az_facts': + module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", date='2022-06-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict(module.params.get('filters')) + for k in module.params.get('filters').keys(): + if "_" in k: + sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + del sanitized_filters[k] + + try: + availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe availability zones.") + + # Turn the boto3 result into ansible_friendly_snaked_names + snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']] + + module.exit_json(availability_zones=snaked_availability_zones) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_facts.py new file mode 100644 index 00000000..91880fdb --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_facts.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: aws_caller_info +version_added: 1.0.0 +short_description: Get information about the user and account being used to make AWS calls. +description: + - This module returns information about the account and user / role from which the AWS access tokens originate. + - The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory. + - This module was called M(amazon.aws.aws_caller_facts) before Ansible 2.9. The usage did not change. + +author: + - Ed Costello (@orthanc) + - Stijn Dubrul (@sdubrul) + +requirements: [ 'botocore', 'boto3' ] +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Get the current caller identity information + amazon.aws.aws_caller_info: + register: caller_info +''' + +RETURN = ''' +account: + description: The account id the access credentials are associated with. + returned: success + type: str + sample: "123456789012" +account_alias: + description: The account alias the access credentials are associated with. + returned: when caller has the iam:ListAccountAliases permission + type: str + sample: "acme-production" +arn: + description: The arn identifying the user the credentials are associated with. + returned: success + type: str + sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name +user_id: + description: | + The user id the access credentials are associated with. Note that this may not correspond to + anything you can look up in the case of roles or federated identities. + returned: success + type: str + sample: 123456789012:my-federated-user-name +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry + + +def main(): + module = AnsibleAWSModule( + argument_spec={}, + supports_check_mode=True, + ) + if module._name == 'aws_caller_facts': + module.deprecate("The 'aws_caller_facts' module has been renamed to 'aws_caller_info'", date='2021-12-01', collection_name='amazon.aws') + + client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + + try: + caller_info = client.get_caller_identity(aws_retry=True) + caller_info.pop('ResponseMetadata', None) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to retrieve caller identity') + + iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + + try: + # Although a list is returned by list_account_aliases AWS supports maximum one alias per account. + # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias. + # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output + response = iam_client.list_account_aliases(aws_retry=True) + if response and response['AccountAliases']: + caller_info['account_alias'] = response['AccountAliases'][0] + else: + caller_info['account_alias'] = '' + except (BotoCoreError, ClientError) as e: + # The iam:ListAccountAliases permission is required for this operation to succeed. + # Lacking this permission is handled gracefully by not returning the account_alias. + pass + + module.exit_json( + changed=False, + **camel_dict_to_snake_dict(caller_info)) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py new file mode 100644 index 00000000..91880fdb --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: aws_caller_info +version_added: 1.0.0 +short_description: Get information about the user and account being used to make AWS calls. +description: + - This module returns information about the account and user / role from which the AWS access tokens originate. + - The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory. + - This module was called M(amazon.aws.aws_caller_facts) before Ansible 2.9. The usage did not change. + +author: + - Ed Costello (@orthanc) + - Stijn Dubrul (@sdubrul) + +requirements: [ 'botocore', 'boto3' ] +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Get the current caller identity information + amazon.aws.aws_caller_info: + register: caller_info +''' + +RETURN = ''' +account: + description: The account id the access credentials are associated with. + returned: success + type: str + sample: "123456789012" +account_alias: + description: The account alias the access credentials are associated with. + returned: when caller has the iam:ListAccountAliases permission + type: str + sample: "acme-production" +arn: + description: The arn identifying the user the credentials are associated with. + returned: success + type: str + sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name +user_id: + description: | + The user id the access credentials are associated with. Note that this may not correspond to + anything you can look up in the case of roles or federated identities. + returned: success + type: str + sample: 123456789012:my-federated-user-name +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry + + +def main(): + module = AnsibleAWSModule( + argument_spec={}, + supports_check_mode=True, + ) + if module._name == 'aws_caller_facts': + module.deprecate("The 'aws_caller_facts' module has been renamed to 'aws_caller_info'", date='2021-12-01', collection_name='amazon.aws') + + client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + + try: + caller_info = client.get_caller_identity(aws_retry=True) + caller_info.pop('ResponseMetadata', None) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to retrieve caller identity') + + iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + + try: + # Although a list is returned by list_account_aliases AWS supports maximum one alias per account. + # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias. + # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output + response = iam_client.list_account_aliases(aws_retry=True) + if response and response['AccountAliases']: + caller_info['account_alias'] = response['AccountAliases'][0] + else: + caller_info['account_alias'] = '' + except (BotoCoreError, ClientError) as e: + # The iam:ListAccountAliases permission is required for this operation to succeed. + # Lacking this permission is handled gracefully by not returning the account_alias. + pass + + module.exit_json( + changed=False, + **camel_dict_to_snake_dict(caller_info)) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_s3.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_s3.py new file mode 100644 index 00000000..eb6d8b90 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_s3.py @@ -0,0 +1,947 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: aws_s3 +version_added: 1.0.0 +short_description: manage objects in S3. +description: + - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and + deleting both objects and buckets, retrieving objects as files or strings and generating download links. + This module has a dependency on boto3 and botocore. +options: + bucket: + description: + - Bucket name. + required: true + type: str + dest: + description: + - The destination file path when downloading an object/key with a GET operation. + type: path + encrypt: + description: + - When set for PUT mode, asks for server-side encryption. + default: true + type: bool + encryption_mode: + description: + - What encryption mode to use if I(encrypt=true). + default: AES256 + choices: + - AES256 + - aws:kms + type: str + expiry: + description: + - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation. + default: 600 + aliases: ['expiration'] + type: int + headers: + description: + - Custom headers for PUT operation, as a dictionary of C(key=value) and C(key=value,key=value). + type: dict + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + type: str + max_keys: + description: + - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + default: 1000 + type: int + metadata: + description: + - Metadata for PUT operation, as a dictionary of C(key=value) and C(key=value,key=value). + type: dict + mode: + description: + - Switches the module behaviour between C(put) (upload), C(get) (download), C(geturl) (return download url, Ansible 1.3+), + C(getstr) (download object as string (1.3+)), C(list) (list keys, Ansible 2.0+), C(create) (bucket), C(delete) (bucket), + and delobj (delete object, Ansible 2.0+). + required: true + choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'] + type: str + object: + description: + - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. + type: str + permission: + description: + - This option lets the user set the canned permissions on the object/bucket that are created. + The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or + C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read), + C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list. + default: ['private'] + type: list + elements: str + prefix: + description: + - Limits the response to keys that begin with the specified prefix for list mode. + default: "" + type: str + version: + description: + - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. + type: str + overwrite: + description: + - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. + - Must be a Boolean, C(always), C(never) or C(different). + - C(true) is the same as C(always). + - C(false) is equal to C(never). + - When this is set to C(different) the MD5 sum of the local file is compared with the 'ETag' of the object/key in S3. + The ETag may or may not be an MD5 digest of the object data. See the ETag response header here + U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html). + default: 'always' + aliases: ['force'] + type: str + retries: + description: + - On recoverable failure, how many times to retry before actually failing. + default: 0 + type: int + aliases: ['retry'] + s3_url: + description: + - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS. + aliases: [ S3_URL ] + type: str + dualstack: + description: + - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Requires at least botocore version 1.4.45. + type: bool + default: false + rgw: + description: + - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url). + default: false + type: bool + src: + description: + - The source file path when performing a PUT operation. + - Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise. + type: path + content: + description: + - The content to PUT into an object. + - The parameter value will be treated as a string and converted to UTF-8 before sending it to S3. + To send binary data, use the I(content_base64) parameter instead. + - Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise. + version_added: "1.3.0" + type: str + content_base64: + description: + - The base64-encoded binary data to PUT into an object. + - Use this if you need to put raw binary data, and don't forget to encode in base64. + - Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise. + version_added: "1.3.0" + type: str + ignore_nonexistent_bucket: + description: + - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the + GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying + I(ignore_nonexistent_bucket=true)." + type: bool + default: false + encryption_kms_key_id: + description: + - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms). + type: str +requirements: [ "boto3", "botocore" ] +author: + - "Lester Wade (@lwade)" + - "Sloane Hertel (@s-hertel)" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: Simple PUT operation + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + +- name: PUT operation from a rendered template + amazon.aws.aws_s3: + bucket: mybucket + object: /object.yaml + content: "{{ lookup('template', 'templates/object.yaml.j2') }}" + mode: put + +- name: Simple PUT operation in Ceph RGW S3 + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + rgw: true + s3_url: "http://localhost:8000" + +- name: Simple GET operation + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + +- name: Get a specific version of an object. + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + version: 48c9ee5131af7a716edc22df9772aa6f + dest: /usr/local/myfile.txt + mode: get + +- name: PUT/upload with metadata + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + metadata: 'Content-Encoding=gzip,Cache-Control=no-cache' + +- name: PUT/upload with custom headers + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + headers: 'x-amz-grant-full-control=emailAddress=owner@example.com' + +- name: List keys simple + amazon.aws.aws_s3: + bucket: mybucket + mode: list + +- name: List keys all options + amazon.aws.aws_s3: + bucket: mybucket + mode: list + prefix: /my/desired/ + marker: /my/desired/0023.txt + max_keys: 472 + +- name: Create an empty bucket + amazon.aws.aws_s3: + bucket: mybucket + mode: create + permission: public-read + +- name: Create a bucket with key as directory, in the EU region + amazon.aws.aws_s3: + bucket: mybucket + object: /my/directory/path + mode: create + region: eu-west-1 + +- name: Delete a bucket and all contents + amazon.aws.aws_s3: + bucket: mybucket + mode: delete + +- name: GET an object but don't download if the file checksums match. New in 2.0 + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + overwrite: different + +- name: Delete an object from a bucket + amazon.aws.aws_s3: + bucket: mybucket + object: /my/desired/key.txt + mode: delobj +''' + +RETURN = ''' +msg: + description: Message indicating the status of the operation. + returned: always + type: str + sample: PUT operation complete +url: + description: URL of the object. + returned: (for put and geturl operations) + type: str + sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature> +expiry: + description: Number of seconds the presigned url is valid for. + returned: (for geturl operation) + type: int + sample: 600 +contents: + description: Contents of the object as string. + returned: (for getstr operation) + type: str + sample: "Hello, world!" +s3_keys: + description: List of object keys. + returned: (for list operation) + type: list + elements: str + sample: + - prefix1/ + - prefix1/key1 + - prefix1/key2 +''' + +import mimetypes +import os +import io +from ssl import SSLError +import base64 + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.basic import to_text +from ansible.module_utils.basic import to_native +from ansible.module_utils.six.moves.urllib.parse import urlparse + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.core import is_boto3_error_message +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import boto3_conn +from ..module_utils.ec2 import get_aws_connection_info +from ..module_utils.s3 import HAS_MD5 +from ..module_utils.s3 import calculate_etag +from ..module_utils.s3 import calculate_etag_content + +IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented'] + + +class Sigv4Required(Exception): + pass + + +def key_check(module, s3, bucket, obj, version=None, validate=True): + try: + if version: + s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + s3.head_object(Bucket=bucket, Key=obj) + except is_boto3_error_code('404'): + return False + except is_boto3_error_code('403') as e: + if validate is True: + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + + return True + + +def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content=None): + s3_etag = get_etag(s3, bucket, obj, version=version) + if local_file is not None: + local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version) + else: + local_etag = calculate_etag_content(module, content, s3_etag, s3, bucket, obj, version) + + return s3_etag == local_etag + + +def get_etag(s3, bucket, obj, version=None): + if version: + key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key_check = s3.head_object(Bucket=bucket, Key=obj) + if not key_check: + return None + return key_check['ETag'] + + +def bucket_check(module, s3, bucket, validate=True): + exists = True + try: + s3.head_bucket(Bucket=bucket) + except is_boto3_error_code('404'): + return False + except is_boto3_error_code('403') as e: + if validate is True: + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + except botocore.exceptions.EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + return exists + + +def create_bucket(module, s3, bucket, location=None): + if module.check_mode: + module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True) + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + try: + if len(configuration) > 0: + s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration) + else: + s3.create_bucket(Bucket=bucket) + if module.params.get('permission'): + # Wait for the bucket to exist before setting ACLs + s3.get_waiter('bucket_exists').wait(Bucket=bucket) + for acl in module.params.get('permission'): + AWSRetry.jittered_backoff( + max_delay=120, catch_extra_error_codes=['NoSuchBucket'] + )(s3.put_bucket_acl)(ACL=acl, Bucket=bucket) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") + + if bucket: + return True + + +def paginated_list(s3, **pagination_params): + pg = s3.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versioned_list_with_fallback(s3, **pagination_params): + try: + versioned_pg = s3.get_paginator('list_object_versions') + for page in versioned_pg.paginate(**pagination_params): + delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])] + current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])] + yield delete_markers + current_objects + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']): + for page in paginated_list(s3, **pagination_params): + yield [{'Key': data['Key']} for data in page] + + +def list_keys(module, s3, bucket, prefix, marker, max_keys): + pagination_params = {'Bucket': bucket} + for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)): + pagination_params[param_name] = param_value + try: + keys = sum(paginated_list(s3, **pagination_params), []) + module.exit_json(msg="LIST operation complete", s3_keys=keys) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket)) + + +def delete_bucket(module, s3, bucket): + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + exists = bucket_check(module, s3, bucket) + if exists is False: + return False + # if there are contents then we need to delete them before we can delete the bucket + for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket): + if keys: + s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) + s3.delete_bucket(Bucket=bucket) + return True + except is_boto3_error_code('NoSuchBucket'): + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket) + + +def delete_key(module, s3, bucket, obj): + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + s3.delete_object(Bucket=bucket, Key=obj) + module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj) + + +def create_dirkey(module, s3, bucket, obj, encrypt): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + params = {'Bucket': bucket, 'Key': obj, 'Body': b''} + if encrypt: + params['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + + s3.put_object(**params) + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True) + + +def path_check(path): + if os.path.exists(path): + return True + else: + return False + + +def option_in_extra_args(option): + temp_option = option.replace('-', '').lower() + + allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition', + 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage', + 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl', + 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP', + 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption', + 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey', + 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'} + + if temp_option in allowed_extra_args: + return allowed_extra_args[temp_option] + + +def upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=None, content=None): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + extra = {} + if encrypt: + extra['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + if metadata: + extra['Metadata'] = {} + + # determine object metadata and extra arguments + for option in metadata: + extra_args_option = option_in_extra_args(option) + if extra_args_option is not None: + extra[extra_args_option] = metadata[option] + else: + extra['Metadata'][option] = metadata[option] + + if 'ContentType' not in extra: + content_type = None + if src is not None: + content_type = mimetypes.guess_type(src)[0] + if content_type is None: + # s3 default content type + content_type = 'binary/octet-stream' + extra['ContentType'] = content_type + + if src is not None: + s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) + else: + f = io.BytesIO(content) + s3.upload_fileobj(Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to complete PUT operation.") + try: + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to set object ACL") + try: + url = s3.generate_presigned_url(ClientMethod='put_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to generate presigned URL") + module.exit_json(msg="PUT operation complete", url=url, changed=True) + + +def download_s3file(module, s3, bucket, obj, dest, retries, version=None): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + # retries is the number of loops; range/xrange needs to be one + # more to get that count of loops. + try: + if version: + key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key = s3.get_object(Bucket=bucket, Key=obj) + except is_boto3_error_code(['404', '403']) as e: + # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but + # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + except is_boto3_error_message('require AWS Signature Version 4'): + raise Sigv4Required() + except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + + optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {} + for x in range(0, retries + 1): + try: + s3.download_file(bucket, obj, dest, **optional_kwargs) + module.exit_json(msg="GET operation complete", changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="Failed while downloading %s." % obj) + # otherwise, try again, this may be a transient timeout. + except SSLError as e: # will ClientError catch SSLError? + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="s3 download failed") + # otherwise, try again, this may be a transient timeout. + + +def download_s3str(module, s3, bucket, obj, version=None, validate=True): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + try: + if version: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read()) + else: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) + module.exit_json(msg="GET operation complete", contents=contents, changed=True) + except is_boto3_error_message('require AWS Signature Version 4'): + raise Sigv4Required() + except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + + +def get_download_url(module, s3, bucket, obj, expiry, changed=True): + try: + url = s3.generate_presigned_url(ClientMethod='get_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while getting download url.") + + +def is_fakes3(s3_url): + """ Return True if s3_url has scheme fakes3:// """ + if s3_url is not None: + return urlparse(s3_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False): + if s3_url and rgw: # TODO - test this + rgw = urlparse(s3_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) + elif is_fakes3(s3_url): + fakes3 = urlparse(s3_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) + if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': + params['config'] = botocore.client.Config(signature_version='s3v4') + elif module.params['mode'] in ('get', 'getstr') and sig_4: + params['config'] = botocore.client.Config(signature_version='s3v4') + if module.params['dualstack']: + dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) + if 'config' in params: + params['config'] = params['config'].merge(dualconf) + else: + params['config'] = dualconf + return boto3_conn(**params) + + +def main(): + argument_spec = dict( + bucket=dict(required=True), + dest=dict(default=None, type='path'), + encrypt=dict(default=True, type='bool'), + encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), + expiry=dict(default=600, type='int', aliases=['expiration']), + headers=dict(type='dict'), + marker=dict(default=""), + max_keys=dict(default=1000, type='int'), + metadata=dict(type='dict'), + mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), + object=dict(), + permission=dict(type='list', elements='str', default=['private']), + version=dict(default=None), + overwrite=dict(aliases=['force'], default='always'), + prefix=dict(default=""), + retries=dict(aliases=['retry'], type='int', default=0), + s3_url=dict(aliases=['S3_URL']), + dualstack=dict(default='no', type='bool'), + rgw=dict(default='no', type='bool'), + src=dict(type='path'), + content=dict(), + content_base64=dict(), + ignore_nonexistent_bucket=dict(default=False, type='bool'), + encryption_kms_key_id=dict() + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['mode', 'put', ['object']], + ['mode', 'get', ['dest', 'object']], + ['mode', 'getstr', ['object']], + ['mode', 'geturl', ['object']]], + mutually_exclusive=[['content', 'content_base64', 'src']], + ) + + bucket = module.params.get('bucket') + encrypt = module.params.get('encrypt') + expiry = module.params.get('expiry') + dest = module.params.get('dest', '') + headers = module.params.get('headers') + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') + metadata = module.params.get('metadata') + mode = module.params.get('mode') + obj = module.params.get('object') + version = module.params.get('version') + overwrite = module.params.get('overwrite') + prefix = module.params.get('prefix') + retries = module.params.get('retries') + s3_url = module.params.get('s3_url') + dualstack = module.params.get('dualstack') + rgw = module.params.get('rgw') + src = module.params.get('src') + content = module.params.get('content') + content_base64 = module.params.get('content_base64') + ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') + + object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] + bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] + + if overwrite not in ['always', 'never', 'different']: + if module.boolean(overwrite): + overwrite = 'always' + else: + overwrite = 'never' + + if overwrite == 'different' and not HAS_MD5: + module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + if module.params.get('object'): + obj = module.params['object'] + # If there is a top level object, do nothing - if the object starts with / + # remove the leading character to maintain compatibility with Ansible versions < 2.4 + if obj.startswith('/'): + obj = obj[1:] + + # Bucket deletion does not require obj. Prevents ambiguity with delobj. + if obj and mode == "delete": + module.fail_json(msg='Parameter obj cannot be used with mode=delete') + + # allow eucarc environment variables to be used if ansible vars aren't set + if not s3_url and 'S3_URL' in os.environ: + s3_url = os.environ['S3_URL'] + + if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url: + module.fail_json(msg='dualstack only applies to AWS S3') + + if dualstack and not module.botocore_at_least('1.4.45'): + module.fail_json(msg='dualstack requires botocore >= 1.4.45') + + # rgw requires an explicit url + if rgw and not s3_url: + module.fail_json(msg='rgw flavour requires s3_url') + + # Look at s3_url and tweak connection settings + # if connecting to RGW, Walrus or fakes3 + if s3_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url) + + validate = not ignore_nonexistent_bucket + + # separate types of ACLs + bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] + object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] + error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] + if error_acl: + module.fail_json(msg='Unknown permission specified: %s' % error_acl) + + # First, we check to see if the bucket exists, we get "bucket" returned. + bucketrtn = bucket_check(module, s3, bucket, validate=validate) + + if validate and mode not in ('create', 'put', 'delete') and not bucketrtn: + module.fail_json(msg="Source bucket cannot be found.") + + if mode == 'get': + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn is False: + if version: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if dest and path_check(dest) and overwrite != 'always': + if overwrite == 'never': + module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False) + if etag_compare(module, s3, bucket, obj, version=version, local_file=dest): + module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) + + try: + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + + if mode == 'put': + + # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified + # these were separated into the variables bucket_acl and object_acl above + + if content is None and content_base64 is None and src is None: + module.fail_json('Either content, content_base64 or src must be specified for PUT operations') + if src is not None and not path_check(src): + module.fail_json('Local object "%s" does not exist for PUT operation' % (src)) + + if bucketrtn: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + else: + # If the bucket doesn't exist we should create it. + # only use valid bucket acls for create_bucket function + module.params['permission'] = bucket_acl + create_bucket(module, s3, bucket, location) + + # the content will be uploaded as a byte string, so we must encode it first + bincontent = None + if content is not None: + bincontent = content.encode('utf-8') + if content_base64 is not None: + bincontent = base64.standard_b64decode(content_base64) + + if keyrtn and overwrite != 'always': + if overwrite == 'never' or etag_compare(module, s3, bucket, obj, version=version, local_file=src, content=bincontent): + # Return the download URL for the existing object + get_download_url(module, s3, bucket, obj, expiry, changed=False) + + # only use valid object acls for the upload_s3file function + module.params['permission'] = object_acl + upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=src, content=bincontent) + + # Delete an object from a bucket, not the entire bucket + if mode == 'delobj': + if obj is None: + module.fail_json(msg="object parameter is required") + if bucket: + deletertn = delete_key(module, s3, bucket, obj) + if deletertn is True: + module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Delete an entire bucket, including all objects in the bucket + if mode == 'delete': + if bucket: + deletertn = delete_bucket(module, s3, bucket) + if deletertn is True: + module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Support for listing a set of keys + if mode == 'list': + exists = bucket_check(module, s3, bucket) + + # If the bucket does not exist then bail out + if not exists: + module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) + + list_keys(module, s3, bucket, prefix, marker, max_keys) + + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. + # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. + if mode == 'create': + + # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified + # these were separated above into the variables bucket_acl and object_acl + + if bucket and not obj: + if bucketrtn: + module.exit_json(msg="Bucket already exists.", changed=False) + else: + # only use valid bucket acls when creating the bucket + module.params['permission'] = bucket_acl + module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) + if bucket and obj: + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + if bucketrtn: + if key_check(module, s3, bucket, dirobj): + module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) + else: + # setting valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt) + else: + # only use valid bucket acls for the create_bucket function + module.params['permission'] = bucket_acl + created = create_bucket(module, s3, bucket, location) + # only use valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt) + + # Support for grabbing the time-expired URL for an object in S3/Walrus. + if mode == 'geturl': + if not bucket and not obj: + module.fail_json(msg="Bucket and Object parameters must be set") + + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + get_download_url(module, s3, bucket, obj, expiry) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if mode == 'getstr': + if bucket and obj: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + try: + download_s3str(module, s3, bucket, obj, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3str(module, s3, bucket, obj, version=version) + elif version is not None: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + module.exit_json(failed=False) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation.py new file mode 100644 index 00000000..030bfc45 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation.py @@ -0,0 +1,808 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudformation +version_added: 1.0.0 +short_description: Create or delete an AWS CloudFormation stack +description: + - Launches or updates an AWS CloudFormation stack and waits for it complete. +notes: + - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh. + The version listed in the requirements is the oldest version that works with the module as a whole. + Some features may require recent versions, and we do not pinpoint a minimum version for each feature. + Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs. +options: + stack_name: + description: + - Name of the CloudFormation stack. + required: true + type: str + disable_rollback: + description: + - If a stacks fails to form, rollback will remove the stack. + default: false + type: bool + on_create_failure: + description: + - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option. + choices: + - DO_NOTHING + - ROLLBACK + - DELETE + type: str + create_timeout: + description: + - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED + type: int + template_parameters: + description: + - A list of hashes of all the template variables for the stack. The value can be a string or a dict. + - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example). + default: {} + type: dict + state: + description: + - If I(state=present), stack will be created. + - If I(state=present) and if stack exists and template has changed, it will be updated. + - If I(state=absent), stack will be removed. + default: present + choices: [ present, absent ] + type: str + template: + description: + - The local path of the CloudFormation template. + - This must be the full path to the file, relative to the working directory. If using roles this may look + like C(roles/cloudformation/files/cloudformation-example.json). + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), + I(template_body) nor I(template_url) are specified, the previous template will be reused. + type: path + notification_arns: + description: + - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events. + type: str + stack_policy: + description: + - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified. + for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051) + type: str + tags: + description: + - Dictionary of tags to associate with stack and its resources during stack creation. + - Can be updated later, updating tags removes previous entries. + type: dict + template_url: + description: + - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an + S3 bucket in the same region as the stack. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified, + the previous template will be reused. + type: str + create_changeset: + description: + - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)." + - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be + deleted immediately with no changeset." + type: bool + default: false + changeset_name: + description: + - Name given to the changeset when creating a changeset. + - Only used when I(create_changeset=true). + - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters. + See the AWS Change Sets docs for more information + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) + type: str + template_format: + description: + - This parameter is ignored since Ansible 2.3 and will be removed after 2022-06-01. + - Templates are now passed raw to CloudFormation regardless of format. + type: str + role_arn: + description: + - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role + docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html) + type: str + termination_protection: + description: + - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18. + type: bool + template_body: + description: + - Template body. Use this to pass in the actual body of the CloudFormation template. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) + are specified, the previous template will be reused. + type: str + events_limit: + description: + - Maximum number of CloudFormation events to fetch from a stack when creating or updating it. + default: 200 + type: int + backoff_delay: + description: + - Number of seconds to wait for the next retry. + default: 3 + type: int + required: False + backoff_max_delay: + description: + - Maximum amount of time to wait between retries. + default: 30 + type: int + required: False + backoff_retries: + description: + - Number of times to retry operation. + - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times. + default: 10 + type: int + required: False + capabilities: + description: + - Specify capabilities that stack template contains. + - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND). + type: list + elements: str + default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ] + +author: "James S. Martin (@jsmartin)" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +requirements: [ boto3, botocore>=1.5.45 ] +''' + +EXAMPLES = ''' +- name: create a cloudformation stack + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + +# Basic role example +- name: create a stack, specify role that cloudformation assumes + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role' + +- name: delete a stack + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation-old" + state: "absent" + +# Create a stack, pass in template from a URL, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template via an URL + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template body via lookup template + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_body: "{{ lookup('template', 'cloudformation.j2') }}" + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute +# When use_previous_value is set to True, the given value will be ignored and +# CloudFormation will use the value from a previously submitted template. +# If use_previous_value is set to False (default) the given value is used. +- amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + template: "files/cloudformation-example.json" + template_parameters: + DBSnapshotIdentifier: + use_previous_value: True + value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot + DBName: + use_previous_value: True + tags: + Stack: "ansible-cloudformation" + +# Enable termination protection on a stack. +# If the stack already exists, this will update its termination protection +- name: enable termination protection during stack creation + amazon.aws.cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + termination_protection: yes + +# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED +# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back. +- name: enable termination protection during stack creation + amazon.aws.cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + create_timeout: 5 + +# Configure rollback behaviour on the unsuccessful creation of a stack allowing +# CloudFormation to clean up, or do nothing in the event of an unsuccessful +# deployment +# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if +# it fails to create +- name: create stack which will delete on creation failure + amazon.aws.cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + on_create_failure: DELETE +''' + +RETURN = ''' +events: + type: list + description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. + returned: always + sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"] +log: + description: Debugging logs. Useful when modifying or finding an error. + returned: always + type: list + sample: ["updating stack"] +change_set_id: + description: The ID of the stack change set if one was created + returned: I(state=present) and I(create_changeset=true) + type: str + sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0" +stack_resources: + description: AWS stack resources and their status. List of dictionaries, one dict per resource. + returned: state == present + type: list + sample: [ + { + "last_updated_time": "2016-10-11T19:40:14.979000+00:00", + "logical_resource_id": "CFTestSg", + "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F", + "resource_type": "AWS::EC2::SecurityGroup", + "status": "UPDATE_COMPLETE", + "status_reason": null + } + ] +stack_outputs: + type: dict + description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. + returned: state == present + sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} +''' # NOQA + +import json +import time +import traceback +import uuid +from hashlib import sha1 + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_bytes +from ansible.module_utils._text import to_native + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto_exception + + +def get_stack_events(cfn, stack_name, events_limit, token_filter=None): + '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.''' + ret = {'events': [], 'log': []} + + try: + pg = cfn.get_paginator( + 'describe_stack_events' + ).paginate( + StackName=stack_name, + PaginationConfig={'MaxItems': events_limit} + ) + if token_filter is not None: + events = list(pg.search( + "StackEvents[?ClientRequestToken == '{0}']".format(token_filter) + )) + else: + events = list(pg.search("StackEvents[*]")) + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist' in error_msg: + # missing stack, don't bail. + ret['log'].append('Stack does not exist.') + return ret + ret['log'].append('Unknown error: ' + str(error_msg)) + return ret + + for e in events: + eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e) + ret['events'].append(eventline) + + if e['ResourceStatus'].endswith('FAILED'): + failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e) + ret['log'].append(failline) + + return ret + + +def create_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.") + + # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and + # 'OnFailure' only apply on creation, not update. + if module.params.get('on_create_failure') is not None: + stack_params['OnFailure'] = module.params['on_create_failure'] + else: + stack_params['DisableRollback'] = module.params['disable_rollback'] + + if module.params.get('create_timeout') is not None: + stack_params['TimeoutInMinutes'] = module.params['create_timeout'] + if module.params.get('termination_protection') is not None: + if boto_supports_termination_protection(cfn): + stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection')) + else: + module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18") + + try: + response = cfn.create_stack(**stack_params) + # Use stack ID to follow stack state in case of on_create_failure = DELETE + result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None)) + except Exception as err: + module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get('StackName'))) + if not result: + module.fail_json(msg="empty result") + return result + + +def list_changesets(cfn, stack_name): + res = cfn.list_change_sets(StackName=stack_name) + return [cs['ChangeSetName'] for cs in res['Summaries']] + + +def create_changeset(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template' or 'template_url' is required.") + if module.params['changeset_name'] is not None: + stack_params['ChangeSetName'] = module.params['changeset_name'] + + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + changeset_name = build_changeset_name(stack_params) + stack_params['ChangeSetName'] = changeset_name + + # Determine if this changeset already exists + pending_changesets = list_changesets(cfn, stack_params['StackName']) + if changeset_name in pending_changesets: + warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets) + result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning]) + else: + cs = cfn.create_change_set(**stack_params) + # Make sure we don't enter an infinite loop + time_end = time.time() + 600 + while time.time() < time_end: + try: + newcs = cfn.describe_change_set(ChangeSetName=cs['Id']) + except botocore.exceptions.BotoCoreError as err: + module.fail_json_aws(err) + if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS': + time.sleep(1) + elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']: + cfn.delete_change_set(ChangeSetName=cs['Id']) + result = dict(changed=False, + output='The created Change Set did not contain any changes to this stack and was deleted.') + # a failed change set does not trigger any stack events so we just want to + # skip any further processing of result and just return it directly + return result + else: + break + # Lets not hog the cpu/spam the AWS API + time.sleep(1) + result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit) + result['change_set_id'] = cs['Id'] + result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']), + 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'], + 'NOTE that dependencies on this stack might fail due to pending changes!'] + except Exception as err: + error_msg = boto_exception(err) + if 'No updates are to be performed.' in error_msg: + result = dict(changed=False, output='Stack is already up-to-date.') + else: + module.fail_json_aws(err, msg='Failed to create change set') + + if not result: + module.fail_json(msg="empty result") + return result + + +def update_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + stack_params['UsePreviousTemplate'] = True + + # if the state is present and the stack already exists, we try to update it. + # AWS will tell us if the stack template and parameters are the same and + # don't need to be updated. + try: + cfn.update_stack(**stack_params) + result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None)) + except Exception as err: + error_msg = boto_exception(err) + if 'No updates are to be performed.' in error_msg: + result = dict(changed=False, output='Stack is already up-to-date.') + else: + module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get('StackName'))) + if not result: + module.fail_json(msg="empty result") + return result + + +def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state): + '''updates termination protection of a stack''' + if not boto_supports_termination_protection(cfn): + module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18") + stack = get_stack_facts(cfn, stack_name) + if stack: + if stack['EnableTerminationProtection'] is not desired_termination_protection_state: + try: + cfn.update_termination_protection( + EnableTerminationProtection=desired_termination_protection_state, + StackName=stack_name) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +def boto_supports_termination_protection(cfn): + '''termination protection was added in botocore 1.7.18''' + return hasattr(cfn, "update_termination_protection") + + +def stack_operation(cfn, stack_name, operation, events_limit, op_token=None): + '''gets the status of a stack while it is created/updated/deleted''' + existed = [] + while True: + try: + stack = get_stack_facts(cfn, stack_name) + existed.append('yes') + except Exception: + # If the stack previously existed, and now can't be found then it's + # been deleted successfully. + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()} + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + if not stack: + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'}) + return ret + # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE + # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13 + elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET': + ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation}) + return ret + elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE': + ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'}) + return ret + # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases. + elif stack['StackStatus'].endswith('_COMPLETE'): + ret.update({'changed': True, 'output': 'Stack %s complete' % operation}) + return ret + elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation}) + return ret + # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. + elif stack['StackStatus'].endswith('_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation}) + return ret + else: + # this can loop forever :/ + time.sleep(5) + return {'failed': True, 'output': 'Failed for unknown reasons.'} + + +def build_changeset_name(stack_params): + if 'ChangeSetName' in stack_params: + return stack_params['ChangeSetName'] + + json_params = json.dumps(stack_params, sort_keys=True) + + return 'Ansible-{0}-{1}'.format( + stack_params['StackName'], + sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest() + ) + + +def check_mode_changeset(module, stack_params, cfn): + """Create a change set, describe it and delete it before returning check mode outputs.""" + stack_params['ChangeSetName'] = build_changeset_name(stack_params) + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + change_set = cfn.create_change_set(**stack_params) + for i in range(60): # total time 5 min + description = cfn.describe_change_set(ChangeSetName=change_set['Id']) + if description['Status'] in ('CREATE_COMPLETE', 'FAILED'): + break + time.sleep(5) + else: + # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail + module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName']) + + cfn.delete_change_set(ChangeSetName=change_set['Id']) + + reason = description.get('StatusReason') + + if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']: + return {'changed': False, 'msg': reason, 'meta': description['StatusReason']} + return {'changed': True, 'msg': reason, 'meta': description['Changes']} + + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + module.fail_json_aws(err) + + +def get_stack_facts(cfn, stack_name): + try: + stack_response = cfn.describe_stacks(StackName=stack_name) + stack_info = stack_response['Stacks'][0] + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist' in error_msg: + # missing stack, don't bail. + return None + + # other error, bail. + raise err + + if stack_response and stack_response.get('Stacks', None): + stacks = stack_response['Stacks'] + if len(stacks): + stack_info = stacks[0] + + return stack_info + + +def main(): + argument_spec = dict( + stack_name=dict(required=True), + template_parameters=dict(required=False, type='dict', default={}), + state=dict(default='present', choices=['present', 'absent']), + template=dict(default=None, required=False, type='path'), + notification_arns=dict(default=None, required=False), + stack_policy=dict(default=None, required=False), + disable_rollback=dict(default=False, type='bool'), + on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), + create_timeout=dict(default=None, type='int'), + template_url=dict(default=None, required=False), + template_body=dict(default=None, required=False), + template_format=dict(removed_at_date='2022-06-01', removed_from_collection='amazon.aws'), + create_changeset=dict(default=False, type='bool'), + changeset_name=dict(default=None, required=False), + role_arn=dict(default=None, required=False), + tags=dict(default=None, type='dict'), + termination_protection=dict(default=None, type='bool'), + events_limit=dict(default=200, type='int'), + backoff_retries=dict(type='int', default=10, required=False), + backoff_delay=dict(type='int', default=3, required=False), + backoff_max_delay=dict(type='int', default=30, required=False), + capabilities=dict(type='list', elements='str', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['template_url', 'template', 'template_body'], + ['disable_rollback', 'on_create_failure']], + supports_check_mode=True + ) + + invalid_capabilities = [] + user_capabilities = module.params.get('capabilities') + for user_cap in user_capabilities: + if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']: + invalid_capabilities.append(user_cap) + + if invalid_capabilities: + module.fail_json(msg="Specified capabilities are invalid : %r," + " please check documentation for valid capabilities" % invalid_capabilities) + + # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. + stack_params = { + 'Capabilities': user_capabilities, + 'ClientRequestToken': to_native(uuid.uuid4()), + } + state = module.params['state'] + stack_params['StackName'] = module.params['stack_name'] + + if module.params['template'] is not None: + with open(module.params['template'], 'r') as template_fh: + stack_params['TemplateBody'] = template_fh.read() + elif module.params['template_body'] is not None: + stack_params['TemplateBody'] = module.params['template_body'] + elif module.params['template_url'] is not None: + stack_params['TemplateURL'] = module.params['template_url'] + + if module.params.get('notification_arns'): + stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') + else: + stack_params['NotificationARNs'] = [] + + # can't check the policy when verifying. + if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']: + with open(module.params['stack_policy'], 'r') as stack_policy_fh: + stack_params['StackPolicyBody'] = stack_policy_fh.read() + + template_parameters = module.params['template_parameters'] + + stack_params['Parameters'] = [] + for k, v in template_parameters.items(): + if isinstance(v, dict): + # set parameter based on a dict to allow additional CFN Parameter Attributes + param = dict(ParameterKey=k) + + if 'value' in v: + param['ParameterValue'] = str(v['value']) + + if 'use_previous_value' in v and bool(v['use_previous_value']): + param['UsePreviousValue'] = True + param.pop('ParameterValue', None) + + stack_params['Parameters'].append(param) + else: + # allow default k/v configuration to set a template parameter + stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + + if isinstance(module.params.get('tags'), dict): + stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + + if module.params.get('role_arn'): + stack_params['RoleARN'] = module.params['role_arn'] + + result = {} + + cfn = module.client('cloudformation') + + # Wrap the cloudformation client methods that this module uses with + # automatic backoff / retry for throttling error codes + backoff_wrapper = AWSRetry.jittered_backoff( + retries=module.params.get('backoff_retries'), + delay=module.params.get('backoff_delay'), + max_delay=module.params.get('backoff_max_delay') + ) + cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events) + cfn.create_stack = backoff_wrapper(cfn.create_stack) + cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets) + cfn.create_change_set = backoff_wrapper(cfn.create_change_set) + cfn.update_stack = backoff_wrapper(cfn.update_stack) + cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks) + cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources) + cfn.delete_stack = backoff_wrapper(cfn.delete_stack) + if boto_supports_termination_protection(cfn): + cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection) + + stack_info = get_stack_facts(cfn, stack_params['StackName']) + + if module.check_mode: + if state == 'absent' and stack_info: + module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) + elif state == 'absent' and not stack_info: + module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) + elif state == 'present' and not stack_info: + module.exit_json(changed=True, msg='New stack would be created', meta=[]) + else: + module.exit_json(**check_mode_changeset(module, stack_params, cfn)) + + if state == 'present': + if not stack_info: + result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) + elif module.params.get('create_changeset'): + result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) + else: + if module.params.get('termination_protection') is not None: + update_termination_protection(module, cfn, stack_params['StackName'], + bool(module.params.get('termination_protection'))) + result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) + + # format the stack output + + stack = get_stack_facts(cfn, stack_params['StackName']) + if stack is not None: + if result.get('stack_outputs') is None: + # always define stack_outputs, but it may be empty + result['stack_outputs'] = {} + for output in stack.get('Outputs', []): + result['stack_outputs'][output['OutputKey']] = output['OutputValue'] + stack_resources = [] + reslist = cfn.list_stack_resources(StackName=stack_params['StackName']) + for res in reslist.get('StackResourceSummaries', []): + stack_resources.append({ + "logical_resource_id": res['LogicalResourceId'], + "physical_resource_id": res.get('PhysicalResourceId', ''), + "resource_type": res['ResourceType'], + "last_updated_time": res['LastUpdatedTimestamp'], + "status": res['ResourceStatus'], + "status_reason": res.get('ResourceStatusReason') # can be blank, apparently + }) + result['stack_resources'] = stack_resources + + elif state == 'absent': + # absent state is different because of the way delete_stack works. + # problem is it it doesn't give an error if stack isn't found + # so must describe the stack first + + try: + stack = get_stack_facts(cfn, stack_params['StackName']) + if not stack: + result = {'changed': False, 'output': 'Stack not found.'} + else: + if stack_params.get('RoleARN') is None: + cfn.delete_stack(StackName=stack_params['StackName']) + else: + cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) + result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), + stack_params.get('ClientRequestToken', None)) + except Exception as err: + module.fail_json_aws(err) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_facts.py new file mode 100644 index 00000000..0c34e8b1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_facts.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudformation_info +version_added: 1.0.0 +short_description: Obtain information about an AWS CloudFormation stack +description: + - Gets information about an AWS CloudFormation stack. + - This module was called C(amazon.aws.cloudformation_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(amazon.aws.cloudformation_info) module no longer returns C(ansible_facts)! +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 +author: + - Justin Menga (@jmenga) + - Kevin Coming (@waffie1) +options: + stack_name: + description: + - The name or id of the CloudFormation stack. Gathers information on all stacks by default. + type: str + all_facts: + description: + - Get all stack information for the stack. + type: bool + default: false + stack_events: + description: + - Get stack events for the stack. + type: bool + default: false + stack_template: + description: + - Get stack template body for the stack. + type: bool + default: false + stack_resources: + description: + - Get stack resources for the stack. + type: bool + default: false + stack_policy: + description: + - Get stack policy for the stack. + type: bool + default: false + stack_change_sets: + description: + - Get stack change sets for the stack + type: bool + default: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Get summary information about a stack + amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + register: output + +- debug: + msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}" + +# When the module is called as cloudformation_facts, return values are published +# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows. +# Note that this is deprecated and will stop working in Ansible after 2021-12-01. + +- amazon.aws.cloudformation_facts: + stack_name: my-cloudformation-stack + +- debug: + msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}" + +# Get stack outputs, when you have the stack name available as a fact +- set_fact: + stack_name: my-awesome-stack + +- amazon.aws.cloudformation_info: + stack_name: "{{ stack_name }}" + register: my_stack + +- debug: + msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}" + +# Get all stack information about a stack +- amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + all_facts: true + +# Get stack resource and stack policy information about a stack +- amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + stack_resources: true + stack_policy: true + +# Fail if the stack doesn't exist +- name: try to get facts about a stack but fail if it doesn't exist + amazon.aws.cloudformation_info: + stack_name: nonexistent-stack + all_facts: yes + failed_when: cloudformation['nonexistent-stack'] is undefined +''' + +RETURN = ''' +stack_description: + description: Summary facts about the stack + returned: if the stack exists + type: dict +stack_outputs: + description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each + output 'OutputValue' parameter + returned: if the stack exists + type: dict + sample: + ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com +stack_parameters: + description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of + each parameter 'ParameterValue' parameter + returned: if the stack exists + type: dict + sample: + DatabaseEngine: mysql + DatabasePassword: "***" +stack_events: + description: All stack events for the stack + returned: only if all_facts or stack_events is true and the stack exists + type: list +stack_policy: + description: Describes the stack policy for the stack + returned: only if all_facts or stack_policy is true and the stack exists + type: dict +stack_template: + description: Describes the stack template for the stack + returned: only if all_facts or stack_template is true and the stack exists + type: dict +stack_resource_list: + description: Describes stack resources for the stack + returned: only if all_facts or stack_resources is true and the stack exists + type: list +stack_resources: + description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each + resource 'PhysicalResourceId' parameter + returned: only if all_facts or stack_resources is true and the stack exists + type: dict + sample: + AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7" + AutoScalingSecurityGroup: "sg-abcd1234" + ApplicationDatabase: "dazvlpr01xj55a" +stack_change_sets: + description: A list of stack change sets. Each item in the list represents the details of a specific changeset + + returned: only if all_facts or stack_change_sets is true and the stack exists + type: list +''' + +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_message +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +class CloudFormationServiceManager: + """Handles CloudFormation Services""" + + def __init__(self, module): + self.module = module + self.client = module.client('cloudformation') + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stacks_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_stacks') + return paginator.paginate(**kwargs).build_full_result()['Stacks'] + + def describe_stacks(self, stack_name=None): + try: + kwargs = {'StackName': stack_name} if stack_name else {} + response = self.describe_stacks_with_backoff(**kwargs) + if response is not None: + return response + self.module.fail_json(msg="Error describing stack(s) - an empty response was returned") + except is_boto3_error_message('does not exist'): + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Error describing stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_resources_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_stack_resources') + return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries'] + + def list_stack_resources(self, stack_name): + try: + return self.list_stack_resources_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_events_with_backoff(self, stack_name): + paginator = self.client.get_paginator('describe_stack_events') + return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents'] + + def describe_stack_events(self, stack_name): + try: + return self.describe_stack_events_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_change_sets_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_change_sets') + return paginator.paginate(StackName=stack_name).build_full_result()['Summaries'] + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_change_set_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_change_set') + return paginator.paginate(**kwargs).build_full_result() + + def describe_stack_change_sets(self, stack_name): + changes = [] + try: + change_sets = self.list_stack_change_sets_with_backoff(stack_name) + for item in change_sets: + changes.append(self.describe_stack_change_set_with_backoff( + StackName=stack_name, + ChangeSetName=item['ChangeSetName'])) + return changes + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_stack_policy_with_backoff(self, stack_name): + return self.client.get_stack_policy(StackName=stack_name) + + def get_stack_policy(self, stack_name): + try: + response = self.get_stack_policy_with_backoff(stack_name) + stack_policy = response.get('StackPolicyBody') + if stack_policy: + return json.loads(stack_policy) + return dict() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_template_with_backoff(self, stack_name): + return self.client.get_template(StackName=stack_name) + + def get_template(self, stack_name): + try: + response = self.get_template_with_backoff(stack_name) + return response.get('TemplateBody') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name) + + +def to_dict(items, key, value): + ''' Transforms a list of items to a Key/Value dictionary ''' + if items: + return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) + else: + return dict() + + +def main(): + argument_spec = dict( + stack_name=dict(), + all_facts=dict(required=False, default=False, type='bool'), + stack_policy=dict(required=False, default=False, type='bool'), + stack_events=dict(required=False, default=False, type='bool'), + stack_resources=dict(required=False, default=False, type='bool'), + stack_template=dict(required=False, default=False, type='bool'), + stack_change_sets=dict(required=False, default=False, type='bool'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + is_old_facts = module._name == 'cloudformation_facts' + if is_old_facts: + module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', " + "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='amazon.aws') + + service_mgr = CloudFormationServiceManager(module) + + if is_old_facts: + result = {'ansible_facts': {'cloudformation': {}}} + else: + result = {'cloudformation': {}} + + for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')): + facts = {'stack_description': stack_description} + stack_name = stack_description.get('StackName') + + # Create stack output and stack parameter dictionaries + if facts['stack_description']: + facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') + facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), + 'ParameterKey', 'ParameterValue') + facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags')) + + # Create optional stack outputs + all_facts = module.params.get('all_facts') + if all_facts or module.params.get('stack_resources'): + facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) + facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), + 'LogicalResourceId', 'PhysicalResourceId') + if all_facts or module.params.get('stack_template'): + facts['stack_template'] = service_mgr.get_template(stack_name) + if all_facts or module.params.get('stack_policy'): + facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get('stack_events'): + facts['stack_events'] = service_mgr.describe_stack_events(stack_name) + if all_facts or module.params.get('stack_change_sets'): + facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name) + + if is_old_facts: + result['ansible_facts']['cloudformation'][stack_name] = facts + else: + result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', + 'stack_parameters', + 'stack_policy', + 'stack_resources', + 'stack_tags', + 'stack_template')) + + module.exit_json(changed=False, **result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py new file mode 100644 index 00000000..0c34e8b1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudformation_info +version_added: 1.0.0 +short_description: Obtain information about an AWS CloudFormation stack +description: + - Gets information about an AWS CloudFormation stack. + - This module was called C(amazon.aws.cloudformation_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(amazon.aws.cloudformation_info) module no longer returns C(ansible_facts)! +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 +author: + - Justin Menga (@jmenga) + - Kevin Coming (@waffie1) +options: + stack_name: + description: + - The name or id of the CloudFormation stack. Gathers information on all stacks by default. + type: str + all_facts: + description: + - Get all stack information for the stack. + type: bool + default: false + stack_events: + description: + - Get stack events for the stack. + type: bool + default: false + stack_template: + description: + - Get stack template body for the stack. + type: bool + default: false + stack_resources: + description: + - Get stack resources for the stack. + type: bool + default: false + stack_policy: + description: + - Get stack policy for the stack. + type: bool + default: false + stack_change_sets: + description: + - Get stack change sets for the stack + type: bool + default: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Get summary information about a stack + amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + register: output + +- debug: + msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}" + +# When the module is called as cloudformation_facts, return values are published +# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows. +# Note that this is deprecated and will stop working in Ansible after 2021-12-01. + +- amazon.aws.cloudformation_facts: + stack_name: my-cloudformation-stack + +- debug: + msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}" + +# Get stack outputs, when you have the stack name available as a fact +- set_fact: + stack_name: my-awesome-stack + +- amazon.aws.cloudformation_info: + stack_name: "{{ stack_name }}" + register: my_stack + +- debug: + msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}" + +# Get all stack information about a stack +- amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + all_facts: true + +# Get stack resource and stack policy information about a stack +- amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + stack_resources: true + stack_policy: true + +# Fail if the stack doesn't exist +- name: try to get facts about a stack but fail if it doesn't exist + amazon.aws.cloudformation_info: + stack_name: nonexistent-stack + all_facts: yes + failed_when: cloudformation['nonexistent-stack'] is undefined +''' + +RETURN = ''' +stack_description: + description: Summary facts about the stack + returned: if the stack exists + type: dict +stack_outputs: + description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each + output 'OutputValue' parameter + returned: if the stack exists + type: dict + sample: + ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com +stack_parameters: + description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of + each parameter 'ParameterValue' parameter + returned: if the stack exists + type: dict + sample: + DatabaseEngine: mysql + DatabasePassword: "***" +stack_events: + description: All stack events for the stack + returned: only if all_facts or stack_events is true and the stack exists + type: list +stack_policy: + description: Describes the stack policy for the stack + returned: only if all_facts or stack_policy is true and the stack exists + type: dict +stack_template: + description: Describes the stack template for the stack + returned: only if all_facts or stack_template is true and the stack exists + type: dict +stack_resource_list: + description: Describes stack resources for the stack + returned: only if all_facts or stack_resources is true and the stack exists + type: list +stack_resources: + description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each + resource 'PhysicalResourceId' parameter + returned: only if all_facts or stack_resources is true and the stack exists + type: dict + sample: + AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7" + AutoScalingSecurityGroup: "sg-abcd1234" + ApplicationDatabase: "dazvlpr01xj55a" +stack_change_sets: + description: A list of stack change sets. Each item in the list represents the details of a specific changeset + + returned: only if all_facts or stack_change_sets is true and the stack exists + type: list +''' + +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_message +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +class CloudFormationServiceManager: + """Handles CloudFormation Services""" + + def __init__(self, module): + self.module = module + self.client = module.client('cloudformation') + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stacks_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_stacks') + return paginator.paginate(**kwargs).build_full_result()['Stacks'] + + def describe_stacks(self, stack_name=None): + try: + kwargs = {'StackName': stack_name} if stack_name else {} + response = self.describe_stacks_with_backoff(**kwargs) + if response is not None: + return response + self.module.fail_json(msg="Error describing stack(s) - an empty response was returned") + except is_boto3_error_message('does not exist'): + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Error describing stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_resources_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_stack_resources') + return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries'] + + def list_stack_resources(self, stack_name): + try: + return self.list_stack_resources_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_events_with_backoff(self, stack_name): + paginator = self.client.get_paginator('describe_stack_events') + return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents'] + + def describe_stack_events(self, stack_name): + try: + return self.describe_stack_events_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_change_sets_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_change_sets') + return paginator.paginate(StackName=stack_name).build_full_result()['Summaries'] + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_change_set_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_change_set') + return paginator.paginate(**kwargs).build_full_result() + + def describe_stack_change_sets(self, stack_name): + changes = [] + try: + change_sets = self.list_stack_change_sets_with_backoff(stack_name) + for item in change_sets: + changes.append(self.describe_stack_change_set_with_backoff( + StackName=stack_name, + ChangeSetName=item['ChangeSetName'])) + return changes + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_stack_policy_with_backoff(self, stack_name): + return self.client.get_stack_policy(StackName=stack_name) + + def get_stack_policy(self, stack_name): + try: + response = self.get_stack_policy_with_backoff(stack_name) + stack_policy = response.get('StackPolicyBody') + if stack_policy: + return json.loads(stack_policy) + return dict() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_template_with_backoff(self, stack_name): + return self.client.get_template(StackName=stack_name) + + def get_template(self, stack_name): + try: + response = self.get_template_with_backoff(stack_name) + return response.get('TemplateBody') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name) + + +def to_dict(items, key, value): + ''' Transforms a list of items to a Key/Value dictionary ''' + if items: + return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) + else: + return dict() + + +def main(): + argument_spec = dict( + stack_name=dict(), + all_facts=dict(required=False, default=False, type='bool'), + stack_policy=dict(required=False, default=False, type='bool'), + stack_events=dict(required=False, default=False, type='bool'), + stack_resources=dict(required=False, default=False, type='bool'), + stack_template=dict(required=False, default=False, type='bool'), + stack_change_sets=dict(required=False, default=False, type='bool'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + is_old_facts = module._name == 'cloudformation_facts' + if is_old_facts: + module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', " + "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='amazon.aws') + + service_mgr = CloudFormationServiceManager(module) + + if is_old_facts: + result = {'ansible_facts': {'cloudformation': {}}} + else: + result = {'cloudformation': {}} + + for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')): + facts = {'stack_description': stack_description} + stack_name = stack_description.get('StackName') + + # Create stack output and stack parameter dictionaries + if facts['stack_description']: + facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') + facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), + 'ParameterKey', 'ParameterValue') + facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags')) + + # Create optional stack outputs + all_facts = module.params.get('all_facts') + if all_facts or module.params.get('stack_resources'): + facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) + facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), + 'LogicalResourceId', 'PhysicalResourceId') + if all_facts or module.params.get('stack_template'): + facts['stack_template'] = service_mgr.get_template(stack_name) + if all_facts or module.params.get('stack_policy'): + facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get('stack_events'): + facts['stack_events'] = service_mgr.describe_stack_events(stack_name) + if all_facts or module.params.get('stack_change_sets'): + facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name) + + if is_old_facts: + result['ansible_facts']['cloudformation'][stack_name] = facts + else: + result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', + 'stack_parameters', + 'stack_policy', + 'stack_resources', + 'stack_tags', + 'stack_template')) + + module.exit_json(changed=False, **result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2.py new file mode 100644 index 00000000..990a7e69 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2.py @@ -0,0 +1,1740 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2 +version_added: 1.0.0 +short_description: create, terminate, start or stop an instance in ec2 +description: + - Creates or terminates ec2 instances. + - > + Note: This module uses the older boto Python module to interact with the EC2 API. + M(amazon.aws.ec2) will still receive bug fixes, but no new features. + Consider using the M(amazon.aws.ec2_instance) module instead. + If M(amazon.aws.ec2_instance) does not support a feature you need that is available in M(amazon.aws.ec2), please + file a feature request. +options: + key_name: + description: + - Key pair to use on the instance. + - The SSH key must already exist in AWS in order to use this argument. + - Keys can be created / deleted using the M(amazon.aws.ec2_key) module. + aliases: ['keypair'] + type: str + id: + description: + - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. + - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. + - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + type: str + group: + description: + - Security group (or list of groups) to use with the instance. + aliases: [ 'groups' ] + type: list + elements: str + group_id: + description: + - Security group id (or list of ids) to use with the instance. + type: list + elements: str + zone: + description: + - AWS availability zone in which to launch the instance. + aliases: [ 'aws_zone', 'ec2_zone' ] + type: str + instance_type: + description: + - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + - Required when creating a new instance. + type: str + aliases: ['type'] + tenancy: + description: + - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC. + - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well. + - Dedicated tenancy is not available for EC2 "micro" instances. + default: default + choices: [ "default", "dedicated" ] + type: str + spot_price: + description: + - Maximum spot price to bid. If not set, a regular on-demand instance is requested. + - A spot request is made with this maximum bid. When it is filled, the instance is started. + type: str + spot_type: + description: + - The type of spot request. + - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again. + default: "one-time" + choices: [ "one-time", "persistent" ] + type: str + image: + description: + - I(ami) ID to use for the instance. + - Required when I(state=present). + type: str + kernel: + description: + - Kernel eki to use for the instance. + type: str + ramdisk: + description: + - Ramdisk eri to use for the instance. + type: str + wait: + description: + - Wait for the instance to reach its desired state before returning. + - Does not wait for SSH, see the 'wait_for_connection' example for details. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + spot_wait_timeout: + description: + - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan. + default: 600 + type: int + count: + description: + - Number of instances to launch. + default: 1 + type: int + monitoring: + description: + - Enable detailed monitoring (CloudWatch) for the instance. + type: bool + default: false + user_data: + description: + - Opaque blob of data which is made available to the EC2 instance. + type: str + instance_tags: + description: + - > + A hash/dictionary of tags to add to the new instance or for + instances to start/stop by tag. For example C({"key":"value"}) or + C({"key":"value","key2":"value2"}). + type: dict + placement_group: + description: + - Placement group for the instance when using EC2 Clustered Compute. + type: str + vpc_subnet_id: + description: + - The subnet ID in which to launch the instance (VPC). + type: str + assign_public_ip: + description: + - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+. + type: bool + private_ip: + description: + - The private ip address to assign the instance (from the vpc subnet). + type: str + instance_profile_name: + description: + - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+. + type: str + instance_ids: + description: + - "list of instance ids, currently used for states: absent, running, stopped" + aliases: ['instance_id'] + type: list + elements: str + source_dest_check: + description: + - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers). + When initially creating an instance the EC2 API defaults this to C(True). + type: bool + termination_protection: + description: + - Enable or Disable the Termination Protection. + - Defaults to C(false). + type: bool + instance_initiated_shutdown_behavior: + description: + - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store. + images (which require termination on shutdown). + default: 'stop' + choices: [ "stop", "terminate" ] + type: str + state: + description: + - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2. + - When I(state=absent), I(instance_ids) is required. + - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required. + default: 'present' + choices: ['absent', 'present', 'restarted', 'running', 'stopped'] + type: str + volumes: + description: + - A list of hash/dictionaries of volumes to add to the new instance. + type: list + elements: dict + suboptions: + device_name: + type: str + required: true + description: + - A name for the device (For example C(/dev/sda)). + delete_on_termination: + type: bool + default: false + description: + - Whether the volume should be automatically deleted when the instance is terminated. + ephemeral: + type: str + description: + - Whether the volume should be ephemeral. + - Data on ephemeral volumes is lost when the instance is stopped. + - Mutually exclusive with the I(snapshot) parameter. + encrypted: + type: bool + default: false + description: + - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK. + snapshot: + type: str + description: + - The ID of an EBS snapshot to copy when creating the volume. + - Mutually exclusive with the I(ephemeral) parameter. + volume_type: + type: str + description: + - The type of volume to create. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types. + volume_size: + type: int + description: + - The size of the volume (in GiB). + iops: + type: int + description: + - The number of IOPS per second to provision for the volume. + - Required when I(volume_type=io1). + ebs_optimized: + description: + - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + default: false + type: bool + exact_count: + description: + - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. + Instances are either created or terminated based on this value. + type: int + count_tag: + description: + - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running. + This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers + that are tagged with C(class=webserver). The specified tag must already exist or be passed in as the I(instance_tags) option. + type: raw + network_interfaces: + description: + - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, + none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are + for creating a new network interface at launch.) + aliases: ['network_interface'] + type: list + elements: str + spot_launch_group: + description: + - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group). + type: str +author: + - "Tim Gerla (@tgerla)" + - "Lester Wade (@lwade)" + - "Seth Vidal (@skvidal)" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic provisioning example +- amazon.aws.ec2: + key_name: mykey + instance_type: t2.micro + image: ami-123456 + wait: yes + group: webserver + count: 3 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Advanced example with tagging and CloudWatch +- amazon.aws.ec2: + key_name: mykey + group: databases + instance_type: t2.micro + image: ami-123456 + wait: yes + wait_timeout: 500 + count: 5 + instance_tags: + db: postgres + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Single instance with additional IOPS volume from snapshot and volume delete on termination +- amazon.aws.ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/sdb + snapshot: snap-abcdef12 + volume_type: io1 + iops: 1000 + volume_size: 100 + delete_on_termination: true + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Single instance with ssd gp2 root volume +- amazon.aws.ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/xvda + volume_type: gp2 + volume_size: 8 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + count_tag: + Name: dbserver + exact_count: 1 + +# Multiple groups example +- amazon.aws.ec2: + key_name: mykey + group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] + instance_type: m1.large + image: ami-6e649707 + wait: yes + wait_timeout: 500 + count: 5 + instance_tags: + db: postgres + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Multiple instances with additional volume from snapshot +- amazon.aws.ec2: + key_name: mykey + group: webserver + instance_type: m1.large + image: ami-6e649707 + wait: yes + wait_timeout: 500 + count: 5 + volumes: + - device_name: /dev/sdb + snapshot: snap-abcdef12 + volume_size: 10 + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Dedicated tenancy example +- amazon.aws.ec2: + assign_public_ip: yes + group_id: sg-1dc53f72 + key_name: mykey + image: ami-6e649707 + instance_type: m1.small + tenancy: dedicated + vpc_subnet_id: subnet-29e63245 + wait: yes + +# Spot instance example +- amazon.aws.ec2: + spot_price: 0.24 + spot_wait_timeout: 600 + keypair: mykey + group_id: sg-1dc53f72 + instance_type: m1.small + image: ami-6e649707 + wait: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + spot_launch_group: report_generators + instance_initiated_shutdown_behavior: terminate + +# Examples using pre-existing network interfaces +- amazon.aws.ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interface: eni-deadbeef + +- amazon.aws.ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e'] + +# Launch instances, runs some tasks +# and then terminate them + +- name: Create a sandbox instance + hosts: localhost + gather_facts: False + vars: + keypair: my_keypair + instance_type: m1.small + security_group: my_securitygroup + image: my_ami_id + region: us-east-1 + tasks: + - name: Launch instance + amazon.aws.ec2: + key_name: "{{ keypair }}" + group: "{{ security_group }}" + instance_type: "{{ instance_type }}" + image: "{{ image }}" + wait: true + region: "{{ region }}" + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + register: ec2 + + - name: Add new instance to host group + add_host: + hostname: "{{ item.public_ip }}" + groupname: launched + loop: "{{ ec2.instances }}" + + - name: Wait for SSH to come up + delegate_to: "{{ item.public_dns_name }}" + wait_for_connection: + delay: 60 + timeout: 320 + loop: "{{ ec2.instances }}" + +- name: Configure instance(s) + hosts: launched + become: True + gather_facts: True + roles: + - my_awesome_role + - my_awesome_test + +- name: Terminate instances + hosts: localhost + tasks: + - name: Terminate instances that were previously launched + amazon.aws.ec2: + state: 'absent' + instance_ids: '{{ ec2.instance_ids }}' + +# Start a few existing instances, run some tasks +# and stop the instances + +- name: Start sandbox instances + hosts: localhost + gather_facts: false + vars: + instance_ids: + - 'i-xxxxxx' + - 'i-xxxxxx' + - 'i-xxxxxx' + region: us-east-1 + tasks: + - name: Start the sandbox instances + amazon.aws.ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: running + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + roles: + - do_neat_stuff + - do_more_neat_stuff + +- name: Stop sandbox instances + hosts: localhost + gather_facts: false + vars: + instance_ids: + - 'i-xxxxxx' + - 'i-xxxxxx' + - 'i-xxxxxx' + region: us-east-1 + tasks: + - name: Stop the sandbox instances + amazon.aws.ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: stopped + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# Start stopped instances specified by tag +# +- amazon.aws.ec2: + instance_tags: + Name: ExtraPower + state: running + +# +# Restart instances specified by tag +# +- amazon.aws.ec2: + instance_tags: + Name: ExtraPower + state: restarted + +# +# Enforce that 5 instances with a tag "foo" are running +# (Highly recommended!) +# + +- amazon.aws.ec2: + key_name: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: yes + group: webserver + instance_tags: + foo: bar + exact_count: 5 + count_tag: foo + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# Enforce that 5 running instances named "database" with a "dbtype" of "postgres" +# + +- amazon.aws.ec2: + key_name: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: yes + group: webserver + instance_tags: + Name: database + dbtype: postgres + exact_count: 5 + count_tag: + Name: database + dbtype: postgres + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# count_tag complex argument examples +# + + # instances with tag foo +- amazon.aws.ec2: + count_tag: + foo: + + # instances with tag foo=bar +- amazon.aws.ec2: + count_tag: + foo: bar + + # instances with tags foo=bar & baz +- amazon.aws.ec2: + count_tag: + foo: bar + baz: + + # instances with tags foo & bar & baz=bang +- amazon.aws.ec2: + count_tag: + - foo + - bar + - baz: bang + +''' + +import time +import datetime +from ast import literal_eval +from distutils.version import LooseVersion + +try: + import boto.ec2 + from boto.ec2.blockdevicemapping import BlockDeviceType + from boto.ec2.blockdevicemapping import BlockDeviceMapping + from boto.exception import EC2ResponseError + from boto import connect_ec2_endpoint + from boto import connect_vpc +except ImportError: + pass # Taken care of by ec2.HAS_BOTO + +from ansible.module_utils.six import get_function_code +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_bytes +from ansible.module_utils._text import to_text + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import HAS_BOTO +from ..module_utils.ec2 import ec2_connect +from ..module_utils.ec2 import get_aws_connection_info + + +def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None): + + # get reservations for instances that match tag(s) and are in the desired state + state = module.params.get('state') + if state not in ['running', 'stopped']: + state = None + reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone) + + instances = [] + for res in reservations: + if hasattr(res, 'instances'): + for inst in res.instances: + if inst.state == 'terminated' or inst.state == 'shutting-down': + continue + instances.append(inst) + + return reservations, instances + + +def _set_none_to_blank(dictionary): + result = dictionary + for k in result: + if isinstance(result[k], dict): + result[k] = _set_none_to_blank(result[k]) + elif not result[k]: + result[k] = "" + return result + + +def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None): + # TODO: filters do not work with tags that have underscores + filters = dict() + + vpc_subnet_id = module.params.get('vpc_subnet_id') + vpc_id = None + if vpc_subnet_id: + filters.update({"subnet-id": vpc_subnet_id}) + if vpc: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + + if vpc_id: + filters.update({"vpc-id": vpc_id}) + + if tags is not None: + + if isinstance(tags, str): + try: + tags = literal_eval(tags) + except Exception: + pass + + # if not a string type, convert and make sure it's a text string + if isinstance(tags, int): + tags = to_text(tags) + + # if string, we only care that a tag of that name exists + if isinstance(tags, str): + filters.update({"tag-key": tags}) + + # if list, append each item to filters + if isinstance(tags, list): + for x in tags: + if isinstance(x, dict): + x = _set_none_to_blank(x) + filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items())) + else: + filters.update({"tag-key": x}) + + # if dict, add the key and value to the filter + if isinstance(tags, dict): + tags = _set_none_to_blank(tags) + filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items())) + + # lets check to see if the filters dict is empty, if so then stop + if not filters: + module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags)) + + if state: + # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api + filters.update({'instance-state-name': state}) + + if zone: + filters.update({'availability-zone': zone}) + + if module.params.get('id'): + filters['client-token'] = module.params['id'] + + results = ec2.get_all_instances(filters=filters) + + return results + + +def get_instance_info(inst): + """ + Retrieves instance information from an instance + ID and returns it as a dictionary + """ + instance_info = {'id': inst.id, + 'ami_launch_index': inst.ami_launch_index, + 'private_ip': inst.private_ip_address, + 'private_dns_name': inst.private_dns_name, + 'public_ip': inst.ip_address, + 'dns_name': inst.dns_name, + 'public_dns_name': inst.public_dns_name, + 'state_code': inst.state_code, + 'architecture': inst.architecture, + 'image_id': inst.image_id, + 'key_name': inst.key_name, + 'placement': inst.placement, + 'region': inst.placement[:-1], + 'kernel': inst.kernel, + 'ramdisk': inst.ramdisk, + 'launch_time': inst.launch_time, + 'instance_type': inst.instance_type, + 'root_device_type': inst.root_device_type, + 'root_device_name': inst.root_device_name, + 'state': inst.state, + 'hypervisor': inst.hypervisor, + 'tags': inst.tags, + 'groups': dict((group.id, group.name) for group in inst.groups), + } + try: + instance_info['virtualization_type'] = getattr(inst, 'virtualization_type') + except AttributeError: + instance_info['virtualization_type'] = None + + try: + instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized') + except AttributeError: + instance_info['ebs_optimized'] = False + + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + + try: + instance_info['tenancy'] = getattr(inst, 'placement_tenancy') + except AttributeError: + instance_info['tenancy'] = 'default' + + return instance_info + + +def boto_supports_associate_public_ip_address(ec2): + """ + Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification + class. Added in Boto 2.13.0 + + ec2: authenticated ec2 connection object + + Returns: + True if Boto library accepts associate_public_ip_address argument, else false + """ + + try: + network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification() + getattr(network_interface, "associate_public_ip_address") + return True + except AttributeError: + return False + + +def boto_supports_profile_name_arg(ec2): + """ + Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0 + + ec2: authenticated ec2 connection object + + Returns: + True if Boto library accept instance_profile_name argument, else false + """ + run_instances_method = getattr(ec2, 'run_instances') + return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames + + +def boto_supports_volume_encryption(): + """ + Check if Boto library supports encryption of EBS volumes (added in 2.29.0) + + Returns: + True if boto library has the named param as an argument on the request_spot_instances method, else False + """ + return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') + + +def create_block_device(module, ec2, volume): + # Not aware of a way to determine this programatically + # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ + MAX_IOPS_TO_SIZE_RATIO = 30 + + volume_type = volume.get('volume_type') + + if 'snapshot' not in volume and 'ephemeral' not in volume: + if 'volume_size' not in volume: + module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') + if 'snapshot' in volume: + if volume_type == 'io1' and 'iops' not in volume: + module.fail_json(msg='io1 volumes must have an iops value set') + if 'iops' in volume: + snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0] + size = volume.get('volume_size', snapshot.volume_size) + if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * int(size): + module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) + if 'ephemeral' in volume: + if 'snapshot' in volume: + module.fail_json(msg='Cannot set both ephemeral and snapshot') + if boto_supports_volume_encryption(): + return BlockDeviceType(snapshot_id=volume.get('snapshot'), + ephemeral_name=volume.get('ephemeral'), + size=volume.get('volume_size'), + volume_type=volume_type, + delete_on_termination=volume.get('delete_on_termination', False), + iops=volume.get('iops'), + encrypted=volume.get('encrypted', None)) + else: + return BlockDeviceType(snapshot_id=volume.get('snapshot'), + ephemeral_name=volume.get('ephemeral'), + size=volume.get('volume_size'), + volume_type=volume_type, + delete_on_termination=volume.get('delete_on_termination', False), + iops=volume.get('iops')) + + +def boto_supports_param_in_spot_request(ec2, param): + """ + Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. + + ec2: authenticated ec2 connection object + + Returns: + True if boto library has the named param as an argument on the request_spot_instances method, else False + """ + method = getattr(ec2, 'request_spot_instances') + return param in get_function_code(method).co_varnames + + +def await_spot_requests(module, ec2, spot_requests, count): + """ + Wait for a group of spot requests to be fulfilled, or fail. + + module: Ansible module object + ec2: authenticated ec2 connection object + spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances + count: Total number of instances to be created by the spot requests + + Returns: + list of instance ID's created by the spot request(s) + """ + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) + wait_complete = time.time() + spot_wait_timeout + + spot_req_inst_ids = dict() + while time.time() < wait_complete: + reqs = ec2.get_all_spot_instance_requests() + for sirb in spot_requests: + if sirb.id in spot_req_inst_ids: + continue + for sir in reqs: + if sir.id != sirb.id: + continue # this is not our spot instance + if sir.instance_id is not None: + spot_req_inst_ids[sirb.id] = sir.instance_id + elif sir.state == 'open': + continue # still waiting, nothing to do here + elif sir.state == 'active': + continue # Instance is created already, nothing to do here + elif sir.state == 'failed': + module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % ( + sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + elif sir.state == 'cancelled': + module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id) + elif sir.state == 'closed': + # instance is terminating or marked for termination + # this may be intentional on the part of the operator, + # or it may have been terminated by AWS due to capacity, + # price, or group constraints in this case, we'll fail + # the module if the reason for the state is anything + # other than termination by user. Codes are documented at + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html + if sir.status.code == 'instance-terminated-by-user': + # do nothing, since the user likely did this on purpose + pass + else: + spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s" + module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + + if len(spot_req_inst_ids) < count: + time.sleep(5) + else: + return list(spot_req_inst_ids.values()) + module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime()) + + +def enforce_count(module, ec2, vpc): + + exact_count = module.params.get('exact_count') + count_tag = module.params.get('count_tag') + zone = module.params.get('zone') + + # fail here if the exact count was specified without filtering + # on a tag, as this may lead to a undesired removal of instances + if exact_count and count_tag is None: + module.fail_json(msg="you must use the 'count_tag' option with exact_count") + + reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone) + + changed = None + checkmode = False + instance_dict_array = [] + changed_instance_ids = None + + if len(instances) == exact_count: + changed = False + elif len(instances) < exact_count: + changed = True + to_create = exact_count - len(instances) + if not checkmode: + (instance_dict_array, changed_instance_ids, changed) \ + = create_instances(module, ec2, vpc, override_count=to_create) + + for inst in instance_dict_array: + instances.append(inst) + elif len(instances) > exact_count: + changed = True + to_remove = len(instances) - exact_count + if not checkmode: + all_instance_ids = sorted([x.id for x in instances]) + remove_ids = all_instance_ids[0:to_remove] + + instances = [x for x in instances if x.id not in remove_ids] + + (changed, instance_dict_array, changed_instance_ids) \ + = terminate_instances(module, ec2, remove_ids) + terminated_list = [] + for inst in instance_dict_array: + inst['state'] = "terminated" + terminated_list.append(inst) + instance_dict_array = terminated_list + + # ensure all instances are dictionaries + all_instances = [] + for inst in instances: + + if not isinstance(inst, dict): + warn_if_public_ip_assignment_changed(module, inst) + inst = get_instance_info(inst) + all_instances.append(inst) + + return (all_instances, instance_dict_array, changed_instance_ids, changed) + + +def create_instances(module, ec2, vpc, override_count=None): + """ + Creates new instances + + module : AnsibleAWSModule object + ec2: authenticated ec2 connection object + + Returns: + A list of dictionaries with instance information + about the instances that were launched + """ + + key_name = module.params.get('key_name') + id = module.params.get('id') + group_name = module.params.get('group') + group_id = module.params.get('group_id') + zone = module.params.get('zone') + instance_type = module.params.get('instance_type') + tenancy = module.params.get('tenancy') + spot_price = module.params.get('spot_price') + spot_type = module.params.get('spot_type') + image = module.params.get('image') + if override_count: + count = override_count + else: + count = module.params.get('count') + monitoring = module.params.get('monitoring') + kernel = module.params.get('kernel') + ramdisk = module.params.get('ramdisk') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) + placement_group = module.params.get('placement_group') + user_data = module.params.get('user_data') + instance_tags = module.params.get('instance_tags') + vpc_subnet_id = module.params.get('vpc_subnet_id') + assign_public_ip = module.boolean(module.params.get('assign_public_ip')) + private_ip = module.params.get('private_ip') + instance_profile_name = module.params.get('instance_profile_name') + volumes = module.params.get('volumes') + ebs_optimized = module.params.get('ebs_optimized') + exact_count = module.params.get('exact_count') + count_tag = module.params.get('count_tag') + source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) + network_interfaces = module.params.get('network_interfaces') + spot_launch_group = module.params.get('spot_launch_group') + instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior') + + vpc_id = None + if vpc_subnet_id: + if not vpc: + module.fail_json(msg="region must be specified") + else: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + else: + vpc_id = None + + try: + # Here we try to lookup the group id from the security group name - if group is set. + if group_name: + if vpc_id: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id}) + else: + grp_details = ec2.get_all_security_groups() + if isinstance(group_name, string_types): + group_name = [group_name] + unmatched = set(group_name).difference(str(grp.name) for grp in grp_details) + if len(unmatched) > 0: + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) + group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name] + # Now we try to lookup the group id testing if group exists. + elif group_id: + # wrap the group_id in a list if it's not one already + if isinstance(group_id, string_types): + group_id = [group_id] + grp_details = ec2.get_all_security_groups(group_ids=group_id) + group_name = [grp_item.name for grp_item in grp_details] + except boto.exception.NoAuthHandlerFound as e: + module.fail_json_aws(e, msg='Unable to authenticate to AWS') + + # Lookup any instances that much our run id. + + running_instances = [] + count_remaining = int(count) + + if id is not None: + filter_dict = {'client-token': id, 'instance-state-name': 'running'} + previous_reservations = ec2.get_all_instances(None, filter_dict) + for res in previous_reservations: + for prev_instance in res.instances: + running_instances.append(prev_instance) + count_remaining = count_remaining - len(running_instances) + + # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. + + if count_remaining == 0: + changed = False + else: + changed = True + try: + params = {'image_id': image, + 'key_name': key_name, + 'monitoring_enabled': monitoring, + 'placement': zone, + 'instance_type': instance_type, + 'kernel_id': kernel, + 'ramdisk_id': ramdisk} + if user_data is not None: + params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict') + + if ebs_optimized: + params['ebs_optimized'] = ebs_optimized + + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request + if not spot_price: + params['tenancy'] = tenancy + + if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name + else: + if instance_profile_name is not None: + module.fail_json( + msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") + + if assign_public_ip is not None: + if not boto_supports_associate_public_ip_address(ec2): + module.fail_json( + msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.") + elif not vpc_subnet_id: + module.fail_json( + msg="assign_public_ip only available with vpc_subnet_id") + + else: + if private_ip: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + private_ip_address=private_ip, + groups=group_id, + associate_public_ip_address=assign_public_ip) + else: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + groups=group_id, + associate_public_ip_address=assign_public_ip) + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + params['network_interfaces'] = interfaces + else: + if network_interfaces: + if isinstance(network_interfaces, string_types): + network_interfaces = [network_interfaces] + interfaces = [] + for i, network_interface_id in enumerate(network_interfaces): + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + network_interface_id=network_interface_id, + device_index=i) + interfaces.append(interface) + params['network_interfaces'] = \ + boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces) + else: + params['subnet_id'] = vpc_subnet_id + if vpc_subnet_id: + params['security_group_ids'] = group_id + else: + params['security_groups'] = group_name + + if volumes: + bdm = BlockDeviceMapping() + for volume in volumes: + if 'device_name' not in volume: + module.fail_json(msg='Device name must be set for volume') + # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 + # to be a signal not to create this volume + if 'volume_size' not in volume or int(volume['volume_size']) > 0: + bdm[volume['device_name']] = create_block_device(module, ec2, volume) + + params['block_device_map'] = bdm + + # check to see if we're using spot pricing first before starting instances + if not spot_price: + if assign_public_ip is not None and private_ip: + params.update( + dict( + min_count=count_remaining, + max_count=count_remaining, + client_token=id, + placement_group=placement_group, + ) + ) + else: + params.update( + dict( + min_count=count_remaining, + max_count=count_remaining, + client_token=id, + placement_group=placement_group, + private_ip_address=private_ip, + ) + ) + + # For ordinary (not spot) instances, we can select 'stop' + # (the default) or 'terminate' here. + params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop' + + try: + res = ec2.run_instances(**params) + except boto.exception.EC2ResponseError as e: + if (params['instance_initiated_shutdown_behavior'] != 'terminate' and + "InvalidParameterCombination" == e.error_code): + params['instance_initiated_shutdown_behavior'] = 'terminate' + res = ec2.run_instances(**params) + else: + raise + + instids = [i.id for i in res.instances] + while True: + try: + ec2.get_all_instances(instids) + break + except boto.exception.EC2ResponseError as e: + if e.error_code == 'InvalidInstanceID.NotFound': + # there's a race between start and get an instance + continue + else: + module.fail_json_aws(e) + + # The instances returned through ec2.run_instances above can be in + # terminated state due to idempotency. See commit 7f11c3d for a complete + # explanation. + terminated_instances = [ + str(instance.id) for instance in res.instances if instance.state == 'terminated' + ] + if terminated_instances: + module.fail_json(msg="Instances with id(s) %s " % terminated_instances + + "were created previously but have since been terminated - " + + "use a (possibly different) 'instanceid' parameter") + + else: + if private_ip: + module.fail_json( + msg='private_ip only available with on-demand (non-spot) instances') + if boto_supports_param_in_spot_request(ec2, 'placement_group'): + params['placement_group'] = placement_group + elif placement_group: + module.fail_json( + msg="placement_group parameter requires Boto version 2.3.0 or higher.") + + # You can't tell spot instances to 'stop'; they will always be + # 'terminate'd. For convenience, we'll ignore the latter value. + if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate': + module.fail_json( + msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.") + + if spot_launch_group and isinstance(spot_launch_group, string_types): + params['launch_group'] = spot_launch_group + + params.update(dict( + count=count_remaining, + type=spot_type, + )) + + # Set spot ValidUntil + # ValidUntil -> (timestamp). The end date of the request, in + # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z). + utc_valid_until = ( + datetime.datetime.utcnow() + + datetime.timedelta(seconds=spot_wait_timeout)) + params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z') + + res = ec2.request_spot_instances(spot_price, **params) + + # Now we have to do the intermediate waiting + if wait: + instids = await_spot_requests(module, ec2, res, count) + else: + instids = [] + except boto.exception.BotoServerError as e: + module.fail_json_aws(e, msg='Instance creation failed') + + # wait here until the instances are up + num_running = 0 + wait_timeout = time.time() + wait_timeout + res_list = () + while wait_timeout > time.time() and num_running < len(instids): + try: + res_list = ec2.get_all_instances(instids) + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidInstanceID.NotFound': + time.sleep(1) + continue + else: + raise + + num_running = 0 + for res in res_list: + num_running += len([i for i in res.instances if i.state == 'running']) + if len(res_list) <= 0: + # got a bad response of some sort, possibly due to + # stale/cached data. Wait a second and then try again + time.sleep(1) + continue + if wait and num_running < len(instids): + time.sleep(5) + else: + break + + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for instances running timeout on %s" % time.asctime()) + + # We do this after the loop ends so that we end up with one list + for res in res_list: + running_instances.extend(res.instances) + + # Enabled by default by AWS + if source_dest_check is False: + for inst in res.instances: + inst.modify_attribute('sourceDestCheck', False) + + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound + if instance_tags and instids: + try: + ec2.create_tags(instids, instance_tags) + except boto.exception.EC2ResponseError as e: + module.fail_json_aws(e, msg='Instance tagging failed') + + instance_dict_array = [] + created_instance_ids = [] + for inst in running_instances: + inst.update() + d = get_instance_info(inst) + created_instance_ids.append(inst.id) + instance_dict_array.append(d) + + return (instance_dict_array, created_instance_ids, changed) + + +def terminate_instances(module, ec2, instance_ids): + """ + Terminates a list of instances + + module: Ansible module object + ec2: authenticated ec2 connection object + termination_list: a list of instances to terminate in the form of + [ {id: <inst-id>}, ..] + + Returns a dictionary of instance information + about the instances terminated. + + If the instance to be terminated is running + "changed" will be set to False. + + """ + + # Whether to wait for termination to complete before returning + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + terminated_instance_ids = [] + for res in ec2.get_all_instances(instance_ids): + for inst in res.instances: + if inst.state == 'running' or inst.state == 'stopped': + terminated_instance_ids.append(inst.id) + instance_dict_array.append(get_instance_info(inst)) + try: + ec2.terminate_instances([inst.id]) + except EC2ResponseError as e: + module.fail_json_aws(e, msg='Unable to terminate instance {0}'.format(inst.id)) + changed = True + + # wait here until the instances are 'terminated' + if wait: + num_terminated = 0 + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids): + response = ec2.get_all_instances(instance_ids=terminated_instance_ids, + filters={'instance-state-name': 'terminated'}) + try: + num_terminated = sum([len(res.instances) for res in response]) + except Exception as e: + # got a bad response of some sort, possibly due to + # stale/cached data. Wait a second and then try again + time.sleep(1) + continue + + if num_terminated < len(terminated_instance_ids): + time.sleep(5) + + # waiting took too long + if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids): + module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime()) + # Lets get the current state of the instances after terminating - issue600 + instance_dict_array = [] + for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}): + for inst in res.instances: + instance_dict_array.append(get_instance_info(inst)) + + return (changed, instance_dict_array, terminated_instance_ids) + + +def startstop_instances(module, ec2, instance_ids, state, instance_tags): + """ + Starts or stops a list of existing instances + + module: Ansible module object + ec2: authenticated ec2 connection object + instance_ids: The list of instances to start in the form of + [ {id: <inst-id>}, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } + state: Intended state ("running" or "stopped") + + Returns a dictionary of instance information + about the instances started/stopped. + + If the instance was not able to change state, + "changed" will be set to False. + + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two + """ + + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + group_id = module.params.get('group_id') + group_name = module.params.get('group') + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + + filters['instance-state-name'] = ["pending", "running", "stopping", "stopped"] + + if module.params.get('id'): + filters['client-token'] = module.params['id'] + # Check that our instances are not in the state we want to take + + # Check (and eventually change) instances attributes and instances state + existing_instances_array = [] + for res in ec2.get_all_instances(instance_ids, filters=filters): + for inst in res.instances: + + warn_if_public_ip_assignment_changed(module, inst) + + changed = (check_source_dest_attr(module, inst, ec2) or + check_termination_protection(module, inst) or changed) + + # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified + if inst.vpc_id and group_name: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id}) + if isinstance(group_name, string_types): + group_name = [group_name] + unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details) + if unmatched: + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) + group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name] + elif inst.vpc_id and group_id: + if isinstance(group_id, string_types): + group_id = [group_id] + grp_details = ec2.get_all_security_groups(group_ids=group_id) + group_ids = [grp_item.id for grp_item in grp_details] + if inst.vpc_id and (group_name or group_id): + if set(sg.id for sg in inst.groups) != set(group_ids): + changed = inst.modify_attribute('groupSet', group_ids) + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError as e: + module.fail_json_aws(e, 'Unable to change state for instance {0}'.format(inst.id)) + changed = True + existing_instances_array.append(inst.id) + + instance_ids = list(set(existing_instances_array + (instance_ids or []))) + # Wait for all the instances to finish starting or stopping + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time(): + instance_dict_array = [] + matched_instances = [] + for res in ec2.get_all_instances(instance_ids): + for i in res.instances: + if i.state == state: + instance_dict_array.append(get_instance_info(i)) + matched_instances.append(i) + if len(matched_instances) < len(instance_ids): + time.sleep(5) + else: + break + + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for instances running timeout on %s" % time.asctime()) + + return (changed, instance_dict_array, instance_ids) + + +def restart_instances(module, ec2, instance_ids, state, instance_tags): + """ + Restarts a list of existing instances + + module: Ansible module object + ec2: authenticated ec2 connection object + instance_ids: The list of instances to start in the form of + [ {id: <inst-id>}, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } + state: Intended state ("restarted") + + Returns a dictionary of instance information + about the instances. + + If the instance was not able to change state, + "changed" will be set to False. + + Wait will not apply here as this is a OS level operation. + + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two. + """ + + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + if module.params.get('id'): + filters['client-token'] = module.params['id'] + + # Check that our instances are not in the state we want to take + + # Check (and eventually change) instances attributes and instances state + for res in ec2.get_all_instances(instance_ids, filters=filters): + for inst in res.instances: + + warn_if_public_ip_assignment_changed(module, inst) + + changed = (check_source_dest_attr(module, inst, ec2) or + check_termination_protection(module, inst) or changed) + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + inst.reboot() + except EC2ResponseError as e: + module.fail_json_aws(e, msg='Unable to change state for instance {0}'.format(inst.id)) + changed = True + + return (changed, instance_dict_array, instance_ids) + + +def check_termination_protection(module, inst): + """ + Check the instance disableApiTermination attribute. + + module: Ansible module object + inst: EC2 instance object + + returns: True if state changed None otherwise + """ + + termination_protection = module.params.get('termination_protection') + + if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None): + inst.modify_attribute('disableApiTermination', termination_protection) + return True + + +def check_source_dest_attr(module, inst, ec2): + """ + Check the instance sourceDestCheck attribute. + + module: Ansible module object + inst: EC2 instance object + + returns: True if state changed None otherwise + """ + + source_dest_check = module.params.get('source_dest_check') + + if source_dest_check is not None: + try: + if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + return True + except boto.exception.EC2ResponseError as exc: + # instances with more than one Elastic Network Interface will + # fail, because they have the sourceDestCheck attribute defined + # per-interface + if exc.code == 'InvalidInstanceID': + for interface in inst.interfaces: + if interface.source_dest_check != source_dest_check: + ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check) + return True + else: + module.fail_json_aws(exc, msg='Failed to handle source_dest_check state for instance {0}'.format(inst.id)) + + +def warn_if_public_ip_assignment_changed(module, instance): + # This is a non-modifiable attribute. + assign_public_ip = module.params.get('assign_public_ip') + + # Check that public ip assignment is the same and warn if not + public_dns_name = getattr(instance, 'public_dns_name', None) + if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False): + module.warn("Unable to modify public ip assignment to {0} for instance {1}. " + "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id)) + + +def main(): + argument_spec = dict( + key_name=dict(aliases=['keypair']), + id=dict(), + group=dict(type='list', elements='str', aliases=['groups']), + group_id=dict(type='list', elements='str'), + zone=dict(aliases=['aws_zone', 'ec2_zone']), + instance_type=dict(aliases=['type']), + spot_price=dict(), + spot_type=dict(default='one-time', choices=["one-time", "persistent"]), + spot_launch_group=dict(), + image=dict(), + kernel=dict(), + count=dict(type='int', default='1'), + monitoring=dict(type='bool', default=False), + ramdisk=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + spot_wait_timeout=dict(type='int', default=600), + placement_group=dict(), + user_data=dict(), + instance_tags=dict(type='dict'), + vpc_subnet_id=dict(), + assign_public_ip=dict(type='bool'), + private_ip=dict(), + instance_profile_name=dict(), + instance_ids=dict(type='list', elements='str', aliases=['instance_id']), + source_dest_check=dict(type='bool', default=None), + termination_protection=dict(type='bool', default=None), + state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']), + instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']), + exact_count=dict(type='int', default=None), + count_tag=dict(type='raw'), + volumes=dict(type='list', elements='dict',), + ebs_optimized=dict(type='bool', default=False), + tenancy=dict(default='default', choices=['default', 'dedicated']), + network_interfaces=dict(type='list', elements='str', aliases=['network_interface']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + check_boto3=False, + mutually_exclusive=[ + # Can be uncommented when we finish the deprecation cycle. + # ['group', 'group_id'], + ['exact_count', 'count'], + ['exact_count', 'state'], + ['exact_count', 'instance_ids'], + ['network_interfaces', 'assign_public_ip'], + ['network_interfaces', 'group'], + ['network_interfaces', 'group_id'], + ['network_interfaces', 'private_ip'], + ['network_interfaces', 'vpc_subnet_id'], + ], + ) + + if module.params.get('group') and module.params.get('group_id'): + module.deprecate( + msg='Support for passing both group and group_id has been deprecated. ' + 'Currently group_id is ignored, in future passing both will result in an error', + date='2022-06-01', collection_name='amazon.aws') + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + if module.params.get('region') or not module.params.get('ec2_url'): + ec2 = ec2_connect(module) + elif module.params.get('ec2_url'): + ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs) + + if 'region' not in aws_connect_kwargs: + aws_connect_kwargs['region'] = ec2.region + + vpc = connect_vpc(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json_aws(e, msg='Failed to get connection') + + tagged_instances = [] + + state = module.params['state'] + + if state == 'absent': + instance_ids = module.params['instance_ids'] + if not instance_ids: + module.fail_json(msg='instance_ids list is required for absent state') + + (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) + + elif state in ('running', 'stopped'): + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) + + (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags) + + elif state in ('restarted'): + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) + + (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags) + + elif state == 'present': + # Changed is always set to true when provisioning new instances + if not module.params.get('image'): + module.fail_json(msg='image parameter is required for new instance') + + if module.params.get('exact_count') is None: + (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc) + else: + (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc) + + # Always return instances in the same order + if new_instance_ids: + new_instance_ids.sort() + if instance_dict_array: + instance_dict_array.sort(key=lambda x: x['id']) + if tagged_instances: + tagged_instances.sort(key=lambda x: x['id']) + + module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py new file mode 100644 index 00000000..86364f78 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py @@ -0,0 +1,761 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_ami +version_added: 1.0.0 +short_description: Create or destroy an image (AMI) in ec2 +description: + - Registers or deregisters ec2 images. +options: + instance_id: + description: + - Instance ID to create the AMI from. + type: str + name: + description: + - The name of the new AMI. + type: str + architecture: + description: + - The target architecture of the image to register + default: "x86_64" + type: str + kernel_id: + description: + - The target kernel id of the image to register. + type: str + virtualization_type: + description: + - The virtualization type of the image to register. + default: "hvm" + type: str + root_device_name: + description: + - The root device name of the image to register. + type: str + wait: + description: + - Wait for the AMI to be in state 'available' before returning. + default: false + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 1200 + type: int + state: + description: + - Register or deregister an AMI. + default: 'present' + choices: [ "absent", "present" ] + type: str + description: + description: + - Human-readable string describing the contents and purpose of the AMI. + type: str + no_reboot: + description: + - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the + responsibility of maintaining file system integrity is left to the owner of the instance. + default: false + type: bool + image_id: + description: + - Image ID to be deregistered. + type: str + device_mapping: + description: + - List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters). + type: list + elements: dict + suboptions: + device_name: + type: str + description: + - The device name. For example C(/dev/sda). + required: yes + aliases: ['DeviceName'] + virtual_name: + type: str + description: + - The virtual name for the device. + - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + - Alias C(VirtualName) has been deprecated and will be removed after 2022-06-01. + aliases: ['VirtualName'] + no_device: + type: bool + description: + - Suppresses the specified device included in the block device mapping of the AMI. + - Alias C(NoDevice) has been deprecated and will be removed after 2022-06-01. + aliases: ['NoDevice'] + volume_type: + type: str + description: The volume type. Defaults to C(gp2) when not set. + delete_on_termination: + type: bool + description: Whether the device should be automatically deleted when the Instance is terminated. + snapshot_id: + type: str + description: The ID of the Snapshot. + iops: + type: int + description: When using an C(io1) I(volume_type) this sets the number of IOPS provisioned for the volume + encrypted: + type: bool + description: Whether the volume should be encrypted. + volume_size: + aliases: ['size'] + type: int + description: The size of the volume (in GiB) + delete_snapshot: + description: + - Delete snapshots when deregistering the AMI. + default: false + type: bool + tags: + description: + - A dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}' + type: dict + purge_tags: + description: Whether to remove existing tags that aren't passed in the C(tags) parameter + default: false + type: bool + launch_permissions: + description: + - Users and groups that should be able to launch the AMI. Expects dictionary with a key of user_ids and/or group_names. user_ids should + be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently. + - You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users) + type: dict + image_location: + description: + - The s3 location of an image to use for the AMI. + type: str + enhanced_networking: + description: + - A boolean representing whether enhanced networking with ENA is enabled or not. + type: bool + billing_products: + description: + - A list of valid billing codes. To be used with valid accounts by aws marketplace vendors. + type: list + elements: str + ramdisk_id: + description: + - The ID of the RAM disk. + type: str + sriov_net_support: + description: + - Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI. + type: str +author: + - "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>" + - "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>" + - "Ross Williams (@gunzy83) <gunzy83au@gmail.com>" + - "Willem van Ketwich (@wilvk) <willvk@gmail.com>" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +# Thank you to iAcquire for sponsoring development of this module. + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Basic AMI Creation + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + wait: yes + name: newtest + tags: + Name: newtest + Service: TestService + +- name: Basic AMI Creation, without waiting + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + wait: no + name: newtest + +- name: AMI Registration from EBS Snapshot + amazon.aws.ec2_ami: + name: newtest + state: present + architecture: x86_64 + virtualization_type: hvm + root_device_name: /dev/xvda + device_mapping: + - device_name: /dev/xvda + volume_size: 8 + snapshot_id: snap-xxxxxxxx + delete_on_termination: true + volume_type: gp2 + +- name: AMI Creation, with a custom root-device size and another EBS attached + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + name: newtest + device_mapping: + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + size: YYY + delete_on_termination: false + volume_type: gp2 + +- name: AMI Creation, excluding a volume attached at /dev/sdb + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + name: newtest + device_mapping: + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + no_device: yes + +- name: Deregister/Delete AMI (keep associated snapshots) + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + delete_snapshot: False + state: absent + +- name: Deregister AMI (delete associated snapshots too) + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + delete_snapshot: True + state: absent + +- name: Update AMI Launch Permissions, making it public + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + group_names: ['all'] + +- name: Allow AMI to be launched by another account + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + user_ids: ['123456789012'] +''' + +RETURN = ''' +architecture: + description: Architecture of image. + returned: when AMI is created or already exists + type: str + sample: "x86_64" +block_device_mapping: + description: Block device mapping associated with image. + returned: when AMI is created or already exists + type: dict + sample: { + "/dev/sda1": { + "delete_on_termination": true, + "encrypted": false, + "size": 10, + "snapshot_id": "snap-1a03b80e7", + "volume_type": "standard" + } + } +creationDate: + description: Creation date of image. + returned: when AMI is created or already exists + type: str + sample: "2015-10-15T22:43:44.000Z" +description: + description: Description of image. + returned: when AMI is created or already exists + type: str + sample: "nat-server" +hypervisor: + description: Type of hypervisor. + returned: when AMI is created or already exists + type: str + sample: "xen" +image_id: + description: ID of the image. + returned: when AMI is created or already exists + type: str + sample: "ami-1234abcd" +is_public: + description: Whether image is public. + returned: when AMI is created or already exists + type: bool + sample: false +launch_permission: + description: Permissions allowing other accounts to access the AMI. + returned: when AMI is created or already exists + type: list + sample: + - group: "all" +location: + description: Location of image. + returned: when AMI is created or already exists + type: str + sample: "315210894379/nat-server" +name: + description: AMI name of image. + returned: when AMI is created or already exists + type: str + sample: "nat-server" +ownerId: + description: Owner of image. + returned: when AMI is created or already exists + type: str + sample: "435210894375" +platform: + description: Platform of image. + returned: when AMI is created or already exists + type: str + sample: null +root_device_name: + description: Root device name of image. + returned: when AMI is created or already exists + type: str + sample: "/dev/sda1" +root_device_type: + description: Root device type of image. + returned: when AMI is created or already exists + type: str + sample: "ebs" +state: + description: State of image. + returned: when AMI is created or already exists + type: str + sample: "available" +tags: + description: A dictionary of tags assigned to image. + returned: when AMI is created or already exists + type: dict + sample: { + "Env": "devel", + "Name": "nat-server" + } +virtualization_type: + description: Image virtualization type. + returned: when AMI is created or already exists + type: str + sample: "hvm" +snapshots_deleted: + description: A list of snapshot ids deleted after deregistering image. + returned: after AMI is deregistered, if I(delete_snapshot=true) + type: list + sample: [ + "snap-fbcccb8f", + "snap-cfe7cdb4" + ] +''' + +import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import compare_aws_tags +from ..module_utils.waiters import get_waiter + + +def get_block_device_mapping(image): + bdm_dict = dict() + if image is not None and image.get('block_device_mappings') is not None: + bdm = image.get('block_device_mappings') + for device in bdm: + device_name = device.get('device_name') + if 'ebs' in device: + ebs = device.get("ebs") + bdm_dict_item = { + 'size': ebs.get("volume_size"), + 'snapshot_id': ebs.get("snapshot_id"), + 'volume_type': ebs.get("volume_type"), + 'encrypted': ebs.get("encrypted"), + 'delete_on_termination': ebs.get("delete_on_termination") + } + elif 'virtual_name' in device: + bdm_dict_item = dict(virtual_name=device['virtual_name']) + bdm_dict[device_name] = bdm_dict_item + return bdm_dict + + +def get_ami_info(camel_image): + image = camel_dict_to_snake_dict(camel_image) + return dict( + image_id=image.get("image_id"), + state=image.get("state"), + architecture=image.get("architecture"), + block_device_mapping=get_block_device_mapping(image), + creationDate=image.get("creation_date"), + description=image.get("description"), + hypervisor=image.get("hypervisor"), + is_public=image.get("public"), + location=image.get("image_location"), + ownerId=image.get("owner_id"), + root_device_name=image.get("root_device_name"), + root_device_type=image.get("root_device_type"), + virtualization_type=image.get("virtualization_type"), + name=image.get("name"), + tags=boto3_tag_list_to_ansible_dict(image.get('tags')), + platform=image.get("platform"), + enhanced_networking=image.get("ena_support"), + image_owner_alias=image.get("image_owner_alias"), + image_type=image.get("image_type"), + kernel_id=image.get("kernel_id"), + product_codes=image.get("product_codes"), + ramdisk_id=image.get("ramdisk_id"), + sriov_net_support=image.get("sriov_net_support"), + state_reason=image.get("state_reason"), + launch_permissions=image.get('launch_permissions') + ) + + +def create_image(module, connection): + instance_id = module.params.get('instance_id') + name = module.params.get('name') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + description = module.params.get('description') + architecture = module.params.get('architecture') + kernel_id = module.params.get('kernel_id') + root_device_name = module.params.get('root_device_name') + virtualization_type = module.params.get('virtualization_type') + no_reboot = module.params.get('no_reboot') + device_mapping = module.params.get('device_mapping') + tags = module.params.get('tags') + launch_permissions = module.params.get('launch_permissions') + image_location = module.params.get('image_location') + enhanced_networking = module.params.get('enhanced_networking') + billing_products = module.params.get('billing_products') + ramdisk_id = module.params.get('ramdisk_id') + sriov_net_support = module.params.get('sriov_net_support') + + try: + params = { + 'Name': name, + 'Description': description + } + + block_device_mapping = None + + # Remove empty values injected by using options + if device_mapping: + block_device_mapping = [] + for device in device_mapping: + device = dict((k, v) for k, v in device.items() if v is not None) + device['Ebs'] = {} + device = rename_item_if_exists(device, 'device_name', 'DeviceName') + device = rename_item_if_exists(device, 'virtual_name', 'VirtualName') + device = rename_item_if_exists(device, 'no_device', 'NoDevice') + device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs') + device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs') + device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs') + device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int) + device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int) + device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs') + device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs') + block_device_mapping.append(device) + if block_device_mapping: + params['BlockDeviceMappings'] = block_device_mapping + if instance_id: + params['InstanceId'] = instance_id + params['NoReboot'] = no_reboot + image_id = connection.create_image(aws_retry=True, **params).get('ImageId') + else: + if architecture: + params['Architecture'] = architecture + if virtualization_type: + params['VirtualizationType'] = virtualization_type + if image_location: + params['ImageLocation'] = image_location + if enhanced_networking: + params['EnaSupport'] = enhanced_networking + if billing_products: + params['BillingProducts'] = billing_products + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if sriov_net_support: + params['SriovNetSupport'] = sriov_net_support + if kernel_id: + params['KernelId'] = kernel_id + if root_device_name: + params['RootDeviceName'] = root_device_name + image_id = connection.register_image(aws_retry=True, **params).get('ImageId') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error registering image") + + if wait: + delay = 15 + max_attempts = wait_timeout // delay + waiter = get_waiter(connection, 'image_available') + waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)) + + if tags: + try: + connection.create_tags(aws_retry=True, Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error tagging image") + + if launch_permissions: + try: + params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list())) + for group_name in launch_permissions.get('group_names', []): + params['LaunchPermission']['Add'].append(dict(Group=group_name)) + for user_id in launch_permissions.get('user_ids', []): + params['LaunchPermission']['Add'].append(dict(UserId=str(user_id))) + if params['LaunchPermission']['Add']: + connection.modify_image_attribute(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id) + + module.exit_json(msg="AMI creation operation complete.", changed=True, + **get_ami_info(get_image_by_id(module, connection, image_id))) + + +def deregister_image(module, connection): + image_id = module.params.get('image_id') + delete_snapshot = module.params.get('delete_snapshot') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + image = get_image_by_id(module, connection, image_id) + + if image is None: + module.exit_json(changed=False) + + # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable. + snapshots = [] + if 'BlockDeviceMappings' in image: + for mapping in image.get('BlockDeviceMappings'): + snapshot_id = mapping.get('Ebs', {}).get('SnapshotId') + if snapshot_id is not None: + snapshots.append(snapshot_id) + + # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes. + if 'ImageId' in image: + try: + connection.deregister_image(aws_retry=True, ImageId=image_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error deregistering image") + else: + module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False) + + image = get_image_by_id(module, connection, image_id) + wait_timeout = time.time() + wait_timeout + + while wait and wait_timeout > time.time() and image is not None: + image = get_image_by_id(module, connection, image_id) + time.sleep(3) + + if wait and wait_timeout <= time.time(): + module.fail_json(msg="Timed out waiting for image to be deregistered.") + + exit_params = {'msg': "AMI deregister operation complete.", 'changed': True} + + if delete_snapshot: + for snapshot_id in snapshots: + try: + connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) + # Don't error out if root volume snapshot was already deregistered as part of deregister_image + except is_boto3_error_code('InvalidSnapshot.NotFound'): + pass + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to delete snapshot.') + exit_params['snapshots_deleted'] = snapshots + + module.exit_json(**exit_params) + + +def update_image(module, connection, image_id): + launch_permissions = module.params.get('launch_permissions') + image = get_image_by_id(module, connection, image_id) + if image is None: + module.fail_json(msg="Image %s does not exist" % image_id, changed=False) + changed = False + + if launch_permissions is not None: + current_permissions = image['LaunchPermissions'] + + current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission) + desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', [])) + current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission) + desired_groups = set(launch_permissions.get('group_names', [])) + + to_add_users = desired_users - current_users + to_remove_users = current_users - desired_users + to_add_groups = desired_groups - current_groups + to_remove_groups = current_groups - desired_groups + + to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users] + to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users] + + if to_add or to_remove: + try: + connection.modify_image_attribute(aws_retry=True, + ImageId=image_id, Attribute='launchPermission', + LaunchPermission=dict(Add=to_add, Remove=to_remove)) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id) + + desired_tags = module.params.get('tags') + if desired_tags is not None: + current_tags = boto3_tag_list_to_ansible_dict(image.get('Tags')) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags=module.params.get('purge_tags')) + + if tags_to_remove: + try: + connection.delete_tags(aws_retry=True, Resources=[image_id], Tags=[dict(Key=tagkey) for tagkey in tags_to_remove]) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error updating tags") + + if tags_to_add: + try: + connection.create_tags(aws_retry=True, Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error updating tags") + + description = module.params.get('description') + if description and description != image['Description']: + try: + connection.modify_image_attribute(aws_retry=True, Attribute='Description ', ImageId=image_id, Description=dict(Value=description)) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error setting description for image %s" % image_id) + + if changed: + module.exit_json(msg="AMI updated.", changed=True, + **get_ami_info(get_image_by_id(module, connection, image_id))) + else: + module.exit_json(msg="AMI not updated.", changed=False, + **get_ami_info(get_image_by_id(module, connection, image_id))) + + +def get_image_by_id(module, connection, image_id): + try: + try: + images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error retrieving image %s" % image_id) + images = images_response.get('Images') + no_images = len(images) + if no_images == 0: + return None + if no_images == 1: + result = images[0] + try: + result['LaunchPermissions'] = connection.describe_image_attribute(aws_retry=True, Attribute='launchPermission', + ImageId=image_id)['LaunchPermissions'] + result['ProductCodes'] = connection.describe_image_attribute(aws_retry=True, Attribute='productCodes', + ImageId=image_id)['ProductCodes'] + except is_boto3_error_code('InvalidAMIID.Unavailable'): + pass + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id) + return result + module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error retrieving image by image_id") + + +def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None): + new_item = dict_object.get(attribute) + if new_item is not None: + if attribute_type is not None: + new_item = attribute_type(new_item) + if child_node is None: + dict_object[new_attribute] = new_item + else: + dict_object[child_node][new_attribute] = new_item + dict_object.pop(attribute) + return dict_object + + +def main(): + mapping_options = dict( + device_name=dict(type='str', aliases=['DeviceName'], required=True), + virtual_name=dict( + type='str', aliases=['VirtualName'], + deprecated_aliases=[dict(name='VirtualName', date='2022-06-01', collection_name='amazon.aws')]), + no_device=dict( + type='bool', aliases=['NoDevice'], + deprecated_aliases=[dict(name='NoDevice', date='2022-06-01', collection_name='amazon.aws')]), + volume_type=dict(type='str'), + delete_on_termination=dict(type='bool'), + snapshot_id=dict(type='str'), + iops=dict(type='int'), + encrypted=dict(type='bool'), + volume_size=dict(type='int', aliases=['size']), + ) + argument_spec = dict( + instance_id=dict(), + image_id=dict(), + architecture=dict(default='x86_64'), + kernel_id=dict(), + virtualization_type=dict(default='hvm'), + root_device_name=dict(), + delete_snapshot=dict(default=False, type='bool'), + name=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(default=1200, type='int'), + description=dict(default=''), + no_reboot=dict(default=False, type='bool'), + state=dict(default='present', choices=['present', 'absent']), + device_mapping=dict(type='list', elements='dict', options=mapping_options), + tags=dict(type='dict'), + launch_permissions=dict(type='dict'), + image_location=dict(), + enhanced_networking=dict(type='bool'), + billing_products=dict(type='list', elements='str',), + ramdisk_id=dict(), + sriov_net_support=dict(), + purge_tags=dict(type='bool', default=False) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ['state', 'absent', ['image_id']], + ] + ) + + # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by + # the required_if for state=absent, so check manually instead + if not any([module.params['image_id'], module.params['name']]): + module.fail_json(msg="one of the following is required: name, image_id") + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + if module.params.get('state') == 'absent': + deregister_image(module, connection) + elif module.params.get('state') == 'present': + if module.params.get('image_id'): + update_image(module, connection, module.params.get('image_id')) + if not module.params.get('instance_id') and not module.params.get('device_mapping'): + module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.") + create_image(module, connection) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_facts.py new file mode 100644 index 00000000..f2b52556 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_facts.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_ami_info +version_added: 1.0.0 +short_description: Gather information about ec2 AMIs +description: + - Gather information about ec2 AMIs + - This module was called C(amazon.aws.ec2_ami_facts) before Ansible 2.9. The usage did not change. +author: + - Prasad Katti (@prasadkatti) +requirements: [ boto3 ] +options: + image_ids: + description: One or more image IDs. + aliases: [image_id] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters. + - Filter names and values are case sensitive. + type: dict + owners: + description: + - Filter the images by the owner. Valid options are an AWS account ID, self, + or an AWS owner alias ( amazon | aws-marketplace | microsoft ). + aliases: [owner] + type: list + elements: str + executable_users: + description: + - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs). + aliases: [executable_user] + type: list + elements: str + describe_image_attributes: + description: + - Describe attributes (like launchPermission) of the images found. + default: no + type: bool + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: gather information about an AMI using ami-id + amazon.aws.ec2_ami_info: + image_ids: ami-5b488823 + +- name: gather information about all AMIs with tag key Name and value webapp + amazon.aws.ec2_ami_info: + filters: + "tag:Name": webapp + +- name: gather information about an AMI with 'AMI Name' equal to foobar + amazon.aws.ec2_ami_info: + filters: + name: foobar + +- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477) + amazon.aws.ec2_ami_info: + owners: 099720109477 + filters: + name: "ubuntu/images/ubuntu-zesty-17.04-*" +''' + +RETURN = ''' +images: + description: A list of images. + returned: always + type: list + elements: dict + contains: + architecture: + description: The architecture of the image. + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries. + returned: always + type: list + elements: dict + contains: + device_name: + description: The device name exposed to the instance. + returned: always + type: str + sample: /dev/sda1 + ebs: + description: EBS volumes + returned: always + type: complex + creation_date: + description: The date and time the image was created. + returned: always + type: str + sample: '2017-10-16T19:22:13.000Z' + description: + description: The description of the AMI. + returned: always + type: str + sample: '' + ena_support: + description: Whether enhanced networking with ENA is enabled. + returned: always + type: bool + sample: true + hypervisor: + description: The hypervisor type of the image. + returned: always + type: str + sample: xen + image_id: + description: The ID of the AMI. + returned: always + type: str + sample: ami-5b466623 + image_location: + description: The location of the AMI. + returned: always + type: str + sample: 408466080000/Webapp + image_type: + description: The type of image. + returned: always + type: str + sample: machine + launch_permissions: + description: A List of AWS accounts may launch the AMI. + returned: When image is owned by calling account and I(describe_image_attributes) is yes. + type: list + elements: dict + contains: + group: + description: A value of 'all' means the AMI is public. + type: str + user_id: + description: An AWS account ID with permissions to launch the AMI. + type: str + sample: [{"group": "all"}, {"user_id": "408466080000"}] + name: + description: The name of the AMI that was provided during image creation. + returned: always + type: str + sample: Webapp + owner_id: + description: The AWS account ID of the image owner. + returned: always + type: str + sample: '408466080000' + public: + description: Whether the image has public launch permissions. + returned: always + type: bool + sample: true + root_device_name: + description: The device name of the root device. + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + sriov_net_support: + description: Whether enhanced networking is enabled. + returned: always + type: str + sample: simple + state: + description: The current state of the AMI. + returned: always + type: str + sample: available + tags: + description: Any tags assigned to the image. + returned: always + type: dict + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_ec2_images(ec2_client, module): + + image_ids = module.params.get("image_ids") + owners = module.params.get("owners") + executable_users = module.params.get("executable_users") + filters = module.params.get("filters") + owner_param = [] + + # describe_images is *very* slow if you pass the `Owners` + # param (unless it's self), for some reason. + # Converting the owners to filters and removing from the + # owners param greatly speeds things up. + # Implementation based on aioue's suggestion in #24886 + for owner in owners: + if owner.isdigit(): + if 'owner-id' not in filters: + filters['owner-id'] = list() + filters['owner-id'].append(owner) + elif owner == 'self': + # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) + owner_param.append(owner) + else: + if 'owner-alias' not in filters: + filters['owner-alias'] = list() + filters['owner-alias'].append(owner) + + filters = ansible_dict_to_boto3_filter_list(filters) + + try: + images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param, + ExecutableUsers=executable_users) + images = [camel_dict_to_snake_dict(image) for image in images["Images"]] + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, msg="error describing images") + for image in images: + try: + image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', [])) + if module.params.get("describe_image_attributes"): + launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission', + ImageId=image['image_id'])['LaunchPermissions'] + image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] + except is_boto3_error_code('AuthFailure'): + # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures + pass + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, 'Failed to describe AMI') + + images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist + module.exit_json(images=images) + + +def main(): + + argument_spec = dict( + image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']), + filters=dict(default={}, type='dict'), + owners=dict(default=[], type='list', elements='str', aliases=['owner']), + executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']), + describe_image_attributes=dict(default=False, type='bool') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._module._name == 'ec2_ami_facts': + module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", date='2021-12-01', collection_name='amazon.aws') + + ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_ec2_images(ec2_client, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py new file mode 100644 index 00000000..f2b52556 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_ami_info +version_added: 1.0.0 +short_description: Gather information about ec2 AMIs +description: + - Gather information about ec2 AMIs + - This module was called C(amazon.aws.ec2_ami_facts) before Ansible 2.9. The usage did not change. +author: + - Prasad Katti (@prasadkatti) +requirements: [ boto3 ] +options: + image_ids: + description: One or more image IDs. + aliases: [image_id] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters. + - Filter names and values are case sensitive. + type: dict + owners: + description: + - Filter the images by the owner. Valid options are an AWS account ID, self, + or an AWS owner alias ( amazon | aws-marketplace | microsoft ). + aliases: [owner] + type: list + elements: str + executable_users: + description: + - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs). + aliases: [executable_user] + type: list + elements: str + describe_image_attributes: + description: + - Describe attributes (like launchPermission) of the images found. + default: no + type: bool + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: gather information about an AMI using ami-id + amazon.aws.ec2_ami_info: + image_ids: ami-5b488823 + +- name: gather information about all AMIs with tag key Name and value webapp + amazon.aws.ec2_ami_info: + filters: + "tag:Name": webapp + +- name: gather information about an AMI with 'AMI Name' equal to foobar + amazon.aws.ec2_ami_info: + filters: + name: foobar + +- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477) + amazon.aws.ec2_ami_info: + owners: 099720109477 + filters: + name: "ubuntu/images/ubuntu-zesty-17.04-*" +''' + +RETURN = ''' +images: + description: A list of images. + returned: always + type: list + elements: dict + contains: + architecture: + description: The architecture of the image. + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries. + returned: always + type: list + elements: dict + contains: + device_name: + description: The device name exposed to the instance. + returned: always + type: str + sample: /dev/sda1 + ebs: + description: EBS volumes + returned: always + type: complex + creation_date: + description: The date and time the image was created. + returned: always + type: str + sample: '2017-10-16T19:22:13.000Z' + description: + description: The description of the AMI. + returned: always + type: str + sample: '' + ena_support: + description: Whether enhanced networking with ENA is enabled. + returned: always + type: bool + sample: true + hypervisor: + description: The hypervisor type of the image. + returned: always + type: str + sample: xen + image_id: + description: The ID of the AMI. + returned: always + type: str + sample: ami-5b466623 + image_location: + description: The location of the AMI. + returned: always + type: str + sample: 408466080000/Webapp + image_type: + description: The type of image. + returned: always + type: str + sample: machine + launch_permissions: + description: A List of AWS accounts may launch the AMI. + returned: When image is owned by calling account and I(describe_image_attributes) is yes. + type: list + elements: dict + contains: + group: + description: A value of 'all' means the AMI is public. + type: str + user_id: + description: An AWS account ID with permissions to launch the AMI. + type: str + sample: [{"group": "all"}, {"user_id": "408466080000"}] + name: + description: The name of the AMI that was provided during image creation. + returned: always + type: str + sample: Webapp + owner_id: + description: The AWS account ID of the image owner. + returned: always + type: str + sample: '408466080000' + public: + description: Whether the image has public launch permissions. + returned: always + type: bool + sample: true + root_device_name: + description: The device name of the root device. + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + sriov_net_support: + description: Whether enhanced networking is enabled. + returned: always + type: str + sample: simple + state: + description: The current state of the AMI. + returned: always + type: str + sample: available + tags: + description: Any tags assigned to the image. + returned: always + type: dict + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_ec2_images(ec2_client, module): + + image_ids = module.params.get("image_ids") + owners = module.params.get("owners") + executable_users = module.params.get("executable_users") + filters = module.params.get("filters") + owner_param = [] + + # describe_images is *very* slow if you pass the `Owners` + # param (unless it's self), for some reason. + # Converting the owners to filters and removing from the + # owners param greatly speeds things up. + # Implementation based on aioue's suggestion in #24886 + for owner in owners: + if owner.isdigit(): + if 'owner-id' not in filters: + filters['owner-id'] = list() + filters['owner-id'].append(owner) + elif owner == 'self': + # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) + owner_param.append(owner) + else: + if 'owner-alias' not in filters: + filters['owner-alias'] = list() + filters['owner-alias'].append(owner) + + filters = ansible_dict_to_boto3_filter_list(filters) + + try: + images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param, + ExecutableUsers=executable_users) + images = [camel_dict_to_snake_dict(image) for image in images["Images"]] + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, msg="error describing images") + for image in images: + try: + image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', [])) + if module.params.get("describe_image_attributes"): + launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission', + ImageId=image['image_id'])['LaunchPermissions'] + image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] + except is_boto3_error_code('AuthFailure'): + # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures + pass + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, 'Failed to describe AMI') + + images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist + module.exit_json(images=images) + + +def main(): + + argument_spec = dict( + image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']), + filters=dict(default={}, type='dict'), + owners=dict(default=[], type='list', elements='str', aliases=['owner']), + executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']), + describe_image_attributes=dict(default=False, type='bool') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._module._name == 'ec2_ami_facts': + module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", date='2021-12-01', collection_name='amazon.aws') + + ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_ec2_images(ec2_client, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py new file mode 100644 index 00000000..a1e732e4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py @@ -0,0 +1,1338 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_elb_lb +version_added: 1.0.0 +description: + - Returns information about the load balancer. + - Will be marked changed when called only if state is changed. +short_description: Creates, updates or destroys an Amazon ELB. +author: + - "Jim Dalton (@jsdalton)" +options: + state: + description: + - Create or destroy the ELB. + type: str + choices: [ absent, present ] + required: true + name: + description: + - The name of the ELB. + type: str + required: true + listeners: + description: + - List of ports/protocols for this ELB to listen on (see examples). + type: list + elements: dict + purge_listeners: + description: + - Purge existing listeners on ELB that are not found in listeners. + type: bool + default: yes + instance_ids: + description: + - List of instance ids to attach to this ELB. + type: list + elements: str + purge_instance_ids: + description: + - Purge existing instance ids on ELB that are not found in instance_ids. + type: bool + default: no + zones: + description: + - List of availability zones to enable on this ELB. + type: list + elements: str + purge_zones: + description: + - Purge existing availability zones on ELB that are not found in zones. + type: bool + default: no + security_group_ids: + description: + - A list of security groups to apply to the ELB. + type: list + elements: str + security_group_names: + description: + - A list of security group names to apply to the ELB. + type: list + elements: str + health_check: + description: + - An associative array of health check configuration settings (see examples). + type: dict + access_logs: + description: + - An associative array of access logs configuration settings (see examples). + type: dict + subnets: + description: + - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. + type: list + elements: str + purge_subnets: + description: + - Purge existing subnet on ELB that are not found in subnets. + type: bool + default: no + scheme: + description: + - The scheme to use when creating the ELB. For a private VPC-visible ELB use C(internal). + - If you choose to update your scheme with a different value the ELB will be destroyed and + recreated. To update scheme you must use the option I(wait). + type: str + choices: ["internal", "internet-facing"] + default: 'internet-facing' + validate_certs: + description: + - When set to C(no), SSL certificates will not be validated for boto versions >= 2.6.0. + type: bool + default: yes + connection_draining_timeout: + description: + - Wait a specified timeout allowing connections to drain before terminating an instance. + type: int + idle_timeout: + description: + - ELB connections from clients and to servers are timed out after this amount of time. + type: int + cross_az_load_balancing: + description: + - Distribute load across all configured Availability Zones. + - Defaults to C(false). + type: bool + stickiness: + description: + - An associative array of stickiness policy settings. Policy will be applied to all listeners (see examples). + type: dict + wait: + description: + - When specified, Ansible will check the status of the load balancer to ensure it has been successfully + removed from AWS. + type: bool + default: no + wait_timeout: + description: + - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated. + - A maximum of 600 seconds (10 minutes) is allowed. + type: int + default: 60 + tags: + description: + - An associative array of tags. To delete all tags, supply an empty dict (C({})). + type: dict + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic provisioning example (non-VPC) + +- amazon.aws.ec2_elb_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + proxy_protocol: True + - protocol: https + load_balancer_port: 443 + instance_protocol: http # optional, defaults to value of protocol setting + instance_port: 80 + # ssl certificate required for https or ssl + ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" + +# Internal ELB example + +- amazon.aws.ec2_elb_lb: + name: "test-vpc" + scheme: internal + state: present + instance_ids: + - i-abcd1234 + purge_instance_ids: true + subnets: + - subnet-abcd1234 + - subnet-1a2b3c4d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + +# Configure a health check and the access logs +- amazon.aws.ec2_elb_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: http # options are http, https, ssl, tcp + ping_port: 80 + ping_path: "/index.html" # not required for tcp or ssl + response_timeout: 5 # seconds + interval: 30 # seconds + unhealthy_threshold: 2 + healthy_threshold: 10 + access_logs: + interval: 5 # minutes (defaults to 60) + s3_location: "my-bucket" # This value is required if access_logs is set + s3_prefix: "logs" + +# Ensure ELB is gone +- amazon.aws.ec2_elb_lb: + name: "test-please-delete" + state: absent + +# Ensure ELB is gone and wait for check (for default timeout) +- amazon.aws.ec2_elb_lb: + name: "test-please-delete" + state: absent + wait: yes + +# Ensure ELB is gone and wait for check with timeout value +- amazon.aws.ec2_elb_lb: + name: "test-please-delete" + state: absent + wait: yes + wait_timeout: 600 + +# Normally, this module will purge any listeners that exist on the ELB +# but aren't specified in the listeners parameter. If purge_listeners is +# false it leaves them alone +- amazon.aws.ec2_elb_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_listeners: no + +# Normally, this module will leave availability zones that are enabled +# on the ELB alone. If purge_zones is true, then any extraneous zones +# will be removed +- amazon.aws.ec2_elb_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_zones: yes + +# Creates a ELB and assigns a list of subnets to it. +- amazon.aws.ec2_elb_lb: + state: present + name: 'New ELB' + security_group_ids: 'sg-123456, sg-67890' + region: us-west-2 + subnets: 'subnet-123456,subnet-67890' + purge_subnets: yes + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with connection draining, increased idle timeout and cross availability +# zone load balancing +- amazon.aws.ec2_elb_lb: + name: "New ELB" + state: present + connection_draining_timeout: 60 + idle_timeout: 300 + cross_az_load_balancing: "yes" + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with load balancer stickiness enabled +- amazon.aws.ec2_elb_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: loadbalancer + enabled: yes + expiration: 300 + +# Create an ELB with application stickiness enabled +- amazon.aws.ec2_elb_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: application + enabled: yes + cookie: SESSIONID + +# Create an ELB and add tags +- amazon.aws.ec2_elb_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: + Name: "New ELB" + stack: "production" + client: "Bob" + +# Delete all tags from an ELB +- amazon.aws.ec2_elb_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: {} +""" + +import random +import time + +try: + import boto + import boto.ec2.elb + import boto.ec2.elb.attributes + import boto.vpc + from boto.ec2.elb.healthcheck import HealthCheck + from boto.ec2.tag import Tag +except ImportError: + pass # Taken care of by ec2.HAS_BOTO + +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AnsibleAWSError +from ..module_utils.ec2 import HAS_BOTO +from ..module_utils.ec2 import connect_to_aws +from ..module_utils.ec2 import get_aws_connection_info + + +def _throttleable_operation(max_retries): + def _operation_wrapper(op): + def _do_op(*args, **kwargs): + retry = 0 + while True: + try: + return op(*args, **kwargs) + except boto.exception.BotoServerError as e: + if retry < max_retries and e.code in \ + ("Throttling", "RequestLimitExceeded"): + retry = retry + 1 + time.sleep(min(random.random() * (2 ** retry), 300)) + continue + else: + raise + return _do_op + return _operation_wrapper + + +def _get_vpc_connection(module, region, aws_connect_params): + try: + return connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json_aws(e, 'Failed to connect to AWS') + + +_THROTTLING_RETRIES = 5 + + +class ElbManager(object): + """Handles ELB creation and destruction""" + + def __init__(self, module, name, listeners=None, purge_listeners=None, + zones=None, purge_zones=None, security_group_ids=None, + health_check=None, subnets=None, purge_subnets=None, + scheme="internet-facing", connection_draining_timeout=None, + idle_timeout=None, + cross_az_load_balancing=None, access_logs=None, + stickiness=None, wait=None, wait_timeout=None, tags=None, + region=None, + instance_ids=None, purge_instance_ids=None, **aws_connect_params): + + self.module = module + self.name = name + self.listeners = listeners + self.purge_listeners = purge_listeners + self.instance_ids = instance_ids + self.purge_instance_ids = purge_instance_ids + self.zones = zones + self.purge_zones = purge_zones + self.security_group_ids = security_group_ids + self.health_check = health_check + self.subnets = subnets + self.purge_subnets = purge_subnets + self.scheme = scheme + self.connection_draining_timeout = connection_draining_timeout + self.idle_timeout = idle_timeout + self.cross_az_load_balancing = cross_az_load_balancing + self.access_logs = access_logs + self.stickiness = stickiness + self.wait = wait + self.wait_timeout = wait_timeout + self.tags = tags + + self.aws_connect_params = aws_connect_params + self.region = region + + self.changed = False + self.status = 'gone' + self.elb_conn = self._get_elb_connection() + + try: + self.elb = self._get_elb() + except boto.exception.BotoServerError as e: + module.fail_json_aws(e, msg='Unable to get all load balancers') + + self.ec2_conn = self._get_ec2_connection() + + @_throttleable_operation(_THROTTLING_RETRIES) + def ensure_ok(self): + """Create the ELB""" + if not self.elb: + # Zones and listeners will be added at creation + self._create_elb() + else: + if self._get_scheme(): + # the only way to change the scheme is by recreating the resource + self.ensure_gone() + self._create_elb() + else: + self._set_zones() + self._set_security_groups() + self._set_elb_listeners() + self._set_subnets() + self._set_health_check() + # boto has introduced support for some ELB attributes in + # different versions, so we check first before trying to + # set them to avoid errors + if self._check_attribute_support('connection_draining'): + self._set_connection_draining_timeout() + if self._check_attribute_support('connecting_settings'): + self._set_idle_timeout() + if self._check_attribute_support('cross_zone_load_balancing'): + self._set_cross_az_load_balancing() + if self._check_attribute_support('access_log'): + self._set_access_log() + # add sticky options + self.select_stickiness_policy() + + # ensure backend server policies are correct + self._set_backend_policies() + # set/remove instance ids + self._set_instance_ids() + + self._set_tags() + + def ensure_gone(self): + """Destroy the ELB""" + if self.elb: + self._delete_elb() + if self.wait: + elb_removed = self._wait_for_elb_removed() + # Unfortunately even though the ELB itself is removed quickly + # the interfaces take longer so reliant security groups cannot + # be deleted until the interface has registered as removed. + elb_interface_removed = self._wait_for_elb_interface_removed() + if not (elb_removed and elb_interface_removed): + self.module.fail_json(msg='Timed out waiting for removal of load balancer.') + + def get_info(self): + try: + check_elb = self.elb_conn.get_all_load_balancers(self.name)[0] + except Exception: + check_elb = None + + if not check_elb: + info = { + 'name': self.name, + 'status': self.status, + 'region': self.region + } + else: + try: + lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name'] + except Exception: + lb_cookie_policy = None + try: + app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name'] + except Exception: + app_cookie_policy = None + + info = { + 'name': check_elb.name, + 'dns_name': check_elb.dns_name, + 'zones': check_elb.availability_zones, + 'security_group_ids': check_elb.security_groups, + 'status': self.status, + 'subnets': self.subnets, + 'scheme': check_elb.scheme, + 'hosted_zone_name': check_elb.canonical_hosted_zone_name, + 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, + 'lb_cookie_policy': lb_cookie_policy, + 'app_cookie_policy': app_cookie_policy, + 'proxy_policy': self._get_proxy_protocol_policy(), + 'backends': self._get_backend_policies(), + 'instances': [instance.id for instance in check_elb.instances], + 'out_of_service_count': 0, + 'in_service_count': 0, + 'unknown_instance_state_count': 0, + 'region': self.region + } + + # status of instances behind the ELB + if info['instances']: + info['instance_health'] = [dict( + instance_id=instance_state.instance_id, + reason_code=instance_state.reason_code, + state=instance_state.state + ) for instance_state in self.elb_conn.describe_instance_health(self.name)] + else: + info['instance_health'] = [] + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] += 1 + + if check_elb.health_check: + info['health_check'] = { + 'target': check_elb.health_check.target, + 'interval': check_elb.health_check.interval, + 'timeout': check_elb.health_check.timeout, + 'healthy_threshold': check_elb.health_check.healthy_threshold, + 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold, + } + + if check_elb.listeners: + info['listeners'] = [self._api_listener_as_tuple(l) + for l in check_elb.listeners] + elif self.status == 'created': + # When creating a new ELB, listeners don't show in the + # immediately returned result, so just include the + # ones that were added + info['listeners'] = [self._listener_as_tuple(l) + for l in self.listeners] + else: + info['listeners'] = [] + + if self._check_attribute_support('connection_draining'): + info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout) + + if self._check_attribute_support('connecting_settings'): + info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout + + if self._check_attribute_support('cross_zone_load_balancing'): + is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') + if is_cross_az_lb_enabled: + info['cross_az_load_balancing'] = 'yes' + else: + info['cross_az_load_balancing'] = 'no' + + # return stickiness info? + + info['tags'] = self.tags + + return info + + @_throttleable_operation(_THROTTLING_RETRIES) + def _wait_for_elb_removed(self): + polling_increment_secs = 15 + max_retries = (self.wait_timeout // polling_increment_secs) + status_achieved = False + + for x in range(0, max_retries): + try: + self.elb_conn.get_all_lb_attributes(self.name) + except (boto.exception.BotoServerError, Exception) as e: + if "LoadBalancerNotFound" in e.code: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + + return status_achieved + + @_throttleable_operation(_THROTTLING_RETRIES) + def _wait_for_elb_interface_removed(self): + polling_increment_secs = 15 + max_retries = (self.wait_timeout // polling_increment_secs) + status_achieved = False + + elb_interfaces = self.ec2_conn.get_all_network_interfaces( + filters={'attachment.instance-owner-id': 'amazon-elb', + 'description': 'ELB {0}'.format(self.name)}) + + for x in range(0, max_retries): + for interface in elb_interfaces: + try: + result = self.ec2_conn.get_all_network_interfaces(interface.id) + if result == []: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + except (boto.exception.BotoServerError, Exception) as e: + if 'InvalidNetworkInterfaceID' in e.code: + status_achieved = True + break + else: + self.module.fail_json_aws(e, 'Failure while waiting for interface to be removed') + + return status_achieved + + @_throttleable_operation(_THROTTLING_RETRIES) + def _get_elb(self): + elbs = self.elb_conn.get_all_load_balancers() + for elb in elbs: + if self.name == elb.name: + self.status = 'ok' + return elb + + def _get_elb_connection(self): + try: + return connect_to_aws(boto.ec2.elb, self.region, + **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json_aws(e, 'Failure while connecting to AWS') + + def _get_ec2_connection(self): + try: + return connect_to_aws(boto.ec2, self.region, + **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, Exception) as e: + self.module.fail_json_aws(e, 'Failure while connecting to AWS') + + @_throttleable_operation(_THROTTLING_RETRIES) + def _delete_elb(self): + # True if succeeds, exception raised if not + result = self.elb_conn.delete_load_balancer(name=self.name) + if result: + self.changed = True + self.status = 'deleted' + + def _create_elb(self): + listeners = [self._listener_as_tuple(l) for l in self.listeners] + self.elb = self.elb_conn.create_load_balancer(name=self.name, + zones=self.zones, + security_groups=self.security_group_ids, + complex_listeners=listeners, + subnets=self.subnets, + scheme=self.scheme) + if self.elb: + # HACK: Work around a boto bug in which the listeners attribute is + # always set to the listeners argument to create_load_balancer, and + # not the complex_listeners + # We're not doing a self.elb = self._get_elb here because there + # might be eventual consistency issues and it doesn't necessarily + # make sense to wait until the ELB gets returned from the EC2 API. + # This is necessary in the event we hit the throttling errors and + # need to retry ensure_ok + # See https://github.com/boto/boto/issues/3526 + self.elb.listeners = self.listeners + self.changed = True + self.status = 'created' + + def _create_elb_listeners(self, listeners): + """Takes a list of listener tuples and creates them""" + # True if succeeds, exception raised if not + self.changed = self.elb_conn.create_load_balancer_listeners(self.name, + complex_listeners=listeners) + + def _delete_elb_listeners(self, listeners): + """Takes a list of listener tuples and deletes them from the elb""" + ports = [l[0] for l in listeners] + + # True if succeeds, exception raised if not + self.changed = self.elb_conn.delete_load_balancer_listeners(self.name, + ports) + + def _set_elb_listeners(self): + """ + Creates listeners specified by self.listeners; overwrites existing + listeners on these ports; removes extraneous listeners + """ + listeners_to_add = [] + listeners_to_remove = [] + listeners_to_keep = [] + + # Check for any listeners we need to create or overwrite + for listener in self.listeners: + listener_as_tuple = self._listener_as_tuple(listener) + + # First we loop through existing listeners to see if one is + # already specified for this port + existing_listener_found = None + for existing_listener in self.elb.listeners: + # Since ELB allows only one listener on each incoming port, a + # single match on the incoming port is all we're looking for + if existing_listener[0] == int(listener['load_balancer_port']): + existing_listener_found = self._api_listener_as_tuple(existing_listener) + break + + if existing_listener_found: + # Does it match exactly? + if listener_as_tuple != existing_listener_found: + # The ports are the same but something else is different, + # so we'll remove the existing one and add the new one + listeners_to_remove.append(existing_listener_found) + listeners_to_add.append(listener_as_tuple) + else: + # We already have this listener, so we're going to keep it + listeners_to_keep.append(existing_listener_found) + else: + # We didn't find an existing listener, so just add the new one + listeners_to_add.append(listener_as_tuple) + + # Check for any extraneous listeners we need to remove, if desired + if self.purge_listeners: + for existing_listener in self.elb.listeners: + existing_listener_tuple = self._api_listener_as_tuple(existing_listener) + if existing_listener_tuple in listeners_to_remove: + # Already queued for removal + continue + if existing_listener_tuple in listeners_to_keep: + # Keep this one around + continue + # Since we're not already removing it and we don't need to keep + # it, let's get rid of it + listeners_to_remove.append(existing_listener_tuple) + + if listeners_to_remove: + self._delete_elb_listeners(listeners_to_remove) + + if listeners_to_add: + self._create_elb_listeners(listeners_to_add) + + def _api_listener_as_tuple(self, listener): + """Adds ssl_certificate_id to ELB API tuple if present""" + base_tuple = listener.get_complex_tuple() + if listener.ssl_certificate_id and len(base_tuple) < 5: + return base_tuple + (listener.ssl_certificate_id,) + return base_tuple + + def _listener_as_tuple(self, listener): + """Formats listener as a 4- or 5-tuples, in the order specified by the + ELB API""" + # N.B. string manipulations on protocols below (str(), upper()) is to + # ensure format matches output from ELB API + listener_list = [ + int(listener['load_balancer_port']), + int(listener['instance_port']), + str(listener['protocol'].upper()), + ] + + # Instance protocol is not required by ELB API; it defaults to match + # load balancer protocol. We'll mimic that behavior here + if 'instance_protocol' in listener: + listener_list.append(str(listener['instance_protocol'].upper())) + else: + listener_list.append(str(listener['protocol'].upper())) + + if 'ssl_certificate_id' in listener: + listener_list.append(str(listener['ssl_certificate_id'])) + + return tuple(listener_list) + + def _enable_zones(self, zones): + try: + self.elb.enable_zones(zones) + except boto.exception.BotoServerError as e: + self.module.fail_json_aws(e, msg='unable to enable zones') + + self.changed = True + + def _disable_zones(self, zones): + try: + self.elb.disable_zones(zones) + except boto.exception.BotoServerError as e: + self.module.fail_json_aws(e, msg='unable to disable zones') + self.changed = True + + def _attach_subnets(self, subnets): + self.elb_conn.attach_lb_to_subnets(self.name, subnets) + self.changed = True + + def _detach_subnets(self, subnets): + self.elb_conn.detach_lb_from_subnets(self.name, subnets) + self.changed = True + + def _set_subnets(self): + """Determine which subnets need to be attached or detached on the ELB""" + if self.subnets: + if self.purge_subnets: + subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets)) + subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) + else: + subnets_to_detach = None + subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) + + if subnets_to_attach: + self._attach_subnets(subnets_to_attach) + if subnets_to_detach: + self._detach_subnets(subnets_to_detach) + + def _get_scheme(self): + """Determine if the current scheme is different than the scheme of the ELB""" + if self.scheme: + if self.elb.scheme != self.scheme: + if not self.wait: + self.module.fail_json(msg="Unable to modify scheme without using the wait option") + return True + return False + + def _set_zones(self): + """Determine which zones need to be enabled or disabled on the ELB""" + if self.zones: + if self.purge_zones: + zones_to_disable = list(set(self.elb.availability_zones) - + set(self.zones)) + zones_to_enable = list(set(self.zones) - + set(self.elb.availability_zones)) + else: + zones_to_disable = None + zones_to_enable = list(set(self.zones) - + set(self.elb.availability_zones)) + if zones_to_enable: + self._enable_zones(zones_to_enable) + # N.B. This must come second, in case it would have removed all zones + if zones_to_disable: + self._disable_zones(zones_to_disable) + + def _set_security_groups(self): + if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids): + self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) + self.changed = True + + def _set_health_check(self): + """Set health check values on ELB as needed""" + if self.health_check: + # This just makes it easier to compare each of the attributes + # and look for changes. Keys are attributes of the current + # health_check; values are desired values of new health_check + health_check_config = { + "target": self._get_health_check_target(), + "timeout": self.health_check['response_timeout'], + "interval": self.health_check['interval'], + "unhealthy_threshold": self.health_check['unhealthy_threshold'], + "healthy_threshold": self.health_check['healthy_threshold'], + } + + update_health_check = False + + # The health_check attribute is *not* set on newly created + # ELBs! So we have to create our own. + if not self.elb.health_check: + self.elb.health_check = HealthCheck() + + for attr, desired_value in health_check_config.items(): + if getattr(self.elb.health_check, attr) != desired_value: + setattr(self.elb.health_check, attr, desired_value) + update_health_check = True + + if update_health_check: + self.elb.configure_health_check(self.elb.health_check) + self.changed = True + + def _check_attribute_support(self, attr): + return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr) + + def _set_cross_az_load_balancing(self): + attributes = self.elb.get_attributes() + if self.cross_az_load_balancing: + if not attributes.cross_zone_load_balancing.enabled: + self.changed = True + attributes.cross_zone_load_balancing.enabled = True + else: + if attributes.cross_zone_load_balancing.enabled: + self.changed = True + attributes.cross_zone_load_balancing.enabled = False + self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing', + attributes.cross_zone_load_balancing.enabled) + + def _set_access_log(self): + attributes = self.elb.get_attributes() + if self.access_logs: + if 's3_location' not in self.access_logs: + self.module.fail_json(msg='s3_location information required') + + access_logs_config = { + "enabled": True, + "s3_bucket_name": self.access_logs['s3_location'], + "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''), + "emit_interval": self.access_logs.get('interval', 60), + } + + update_access_logs_config = False + for attr, desired_value in access_logs_config.items(): + if getattr(attributes.access_log, attr) != desired_value: + setattr(attributes.access_log, attr, desired_value) + update_access_logs_config = True + if update_access_logs_config: + self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) + self.changed = True + elif attributes.access_log.enabled: + attributes.access_log.enabled = False + self.changed = True + self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) + + def _set_connection_draining_timeout(self): + attributes = self.elb.get_attributes() + if self.connection_draining_timeout is not None: + if not attributes.connection_draining.enabled or \ + attributes.connection_draining.timeout != self.connection_draining_timeout: + self.changed = True + attributes.connection_draining.enabled = True + attributes.connection_draining.timeout = self.connection_draining_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + else: + if attributes.connection_draining.enabled: + self.changed = True + attributes.connection_draining.enabled = False + self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + + def _set_idle_timeout(self): + attributes = self.elb.get_attributes() + if self.idle_timeout is not None: + if attributes.connecting_settings.idle_timeout != self.idle_timeout: + self.changed = True + attributes.connecting_settings.idle_timeout = self.idle_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) + + def _policy_name(self, policy_type): + return 'ec2-elb-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict')) + + def _create_policy(self, policy_param, policy_meth, policy): + getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy) + + def _delete_policy(self, elb_name, policy): + self.elb_conn.delete_lb_policy(elb_name, policy) + + def _update_policy(self, policy_param, policy_meth, policy_attr, policy): + self._delete_policy(self.elb.name, policy) + self._create_policy(policy_param, policy_meth, policy) + + def _set_listener_policy(self, listeners_dict, policy=None): + policy = [] if policy is None else policy + + for listener_port in listeners_dict: + if listeners_dict[listener_port].startswith('HTTP'): + self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy) + + def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs): + for p in getattr(elb_info.policies, policy_attrs['attr']): + if str(p.__dict__['policy_name']) == str(policy[0]): + if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0): + self._set_listener_policy(listeners_dict) + self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0]) + self.changed = True + break + else: + self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) + self.changed = True + + self._set_listener_policy(listeners_dict, policy) + + def select_stickiness_policy(self): + if self.stickiness: + + if 'cookie' in self.stickiness and 'expiration' in self.stickiness: + self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time') + + elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0] + d = {} + for listener in elb_info.listeners: + d[listener[0]] = listener[2] + listeners_dict = d + + if self.stickiness['type'] == 'loadbalancer': + policy = [] + policy_type = 'LBCookieStickinessPolicyType' + + if self.module.boolean(self.stickiness['enabled']): + + if 'expiration' not in self.stickiness: + self.module.fail_json(msg='expiration must be set when type is loadbalancer') + + try: + expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None + except ValueError: + self.module.fail_json(msg='expiration must be set to an integer') + + policy_attrs = { + 'type': policy_type, + 'attr': 'lb_cookie_stickiness_policies', + 'method': 'create_lb_cookie_stickiness_policy', + 'dict_key': 'cookie_expiration_period', + 'param_value': expiration + } + policy.append(self._policy_name(policy_attrs['type'])) + + self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) + elif not self.module.boolean(self.stickiness['enabled']): + if len(elb_info.policies.lb_cookie_stickiness_policies): + if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): + self.changed = True + else: + self.changed = False + self._set_listener_policy(listeners_dict) + self._delete_policy(self.elb.name, self._policy_name(policy_type)) + + elif self.stickiness['type'] == 'application': + policy = [] + policy_type = 'AppCookieStickinessPolicyType' + if self.module.boolean(self.stickiness['enabled']): + + if 'cookie' not in self.stickiness: + self.module.fail_json(msg='cookie must be set when type is application') + + policy_attrs = { + 'type': policy_type, + 'attr': 'app_cookie_stickiness_policies', + 'method': 'create_app_cookie_stickiness_policy', + 'dict_key': 'cookie_name', + 'param_value': self.stickiness['cookie'] + } + policy.append(self._policy_name(policy_attrs['type'])) + self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) + elif not self.module.boolean(self.stickiness['enabled']): + if len(elb_info.policies.app_cookie_stickiness_policies): + if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): + self.changed = True + self._set_listener_policy(listeners_dict) + self._delete_policy(self.elb.name, self._policy_name(policy_type)) + + else: + self._set_listener_policy(listeners_dict) + + def _get_backend_policies(self): + """Get a list of backend policies""" + policies = [] + if self.elb.backends is not None: + for backend in self.elb.backends: + if backend.policies is not None: + for policy in backend.policies: + policies.append(str(backend.instance_port) + ':' + policy.policy_name) + + return policies + + def _set_backend_policies(self): + """Sets policies for all backends""" + ensure_proxy_protocol = False + replace = [] + backend_policies = self._get_backend_policies() + + # Find out what needs to be changed + for listener in self.listeners: + want = False + + if 'proxy_protocol' in listener and listener['proxy_protocol']: + ensure_proxy_protocol = True + want = True + + if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies: + if not want: + replace.append({'port': listener['instance_port'], 'policies': []}) + elif want: + replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']}) + + # enable or disable proxy protocol + if ensure_proxy_protocol: + self._set_proxy_protocol_policy() + + # Make the backend policies so + for item in replace: + self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies']) + self.changed = True + + def _get_proxy_protocol_policy(self): + """Find out if the elb has a proxy protocol enabled""" + if self.elb.policies is not None and self.elb.policies.other_policies is not None: + for policy in self.elb.policies.other_policies: + if policy.policy_name == 'ProxyProtocol-policy': + return policy.policy_name + + return None + + def _set_proxy_protocol_policy(self): + """Install a proxy protocol policy if needed""" + proxy_policy = self._get_proxy_protocol_policy() + + if proxy_policy is None: + self.elb_conn.create_lb_policy( + self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True} + ) + self.changed = True + + # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there + + def _diff_list(self, a, b): + """Find the entries in list a that are not in list b""" + b = set(b) + return [aa for aa in a if aa not in b] + + def _get_instance_ids(self): + """Get the current list of instance ids installed in the elb""" + instances = [] + if self.elb.instances is not None: + for instance in self.elb.instances: + instances.append(instance.id) + + return instances + + def _set_instance_ids(self): + """Register or deregister instances from an lb instance""" + assert_instances = self.instance_ids or [] + + has_instances = self._get_instance_ids() + + add_instances = self._diff_list(assert_instances, has_instances) + if add_instances: + self.elb_conn.register_instances(self.elb.name, add_instances) + self.changed = True + + if self.purge_instance_ids: + remove_instances = self._diff_list(has_instances, assert_instances) + if remove_instances: + self.elb_conn.deregister_instances(self.elb.name, remove_instances) + self.changed = True + + def _set_tags(self): + """Add/Delete tags""" + if self.tags is None: + return + + params = {'LoadBalancerNames.member.1': self.name} + + tagdict = dict() + + # get the current list of tags from the ELB, if ELB exists + if self.elb: + current_tags = self.elb_conn.get_list('DescribeTags', params, + [('member', Tag)]) + tagdict = dict((tag.Key, tag.Value) for tag in current_tags + if hasattr(tag, 'Key')) + + # Add missing tags + dictact = dict(set(self.tags.items()) - set(tagdict.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + params['Tags.member.%d.Value' % (i + 1)] = dictact[key] + + self.elb_conn.make_request('AddTags', params) + self.changed = True + + # Remove extra tags + dictact = dict(set(tagdict.items()) - set(self.tags.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + + self.elb_conn.make_request('RemoveTags', params) + self.changed = True + + def _get_health_check_target(self): + """Compose target string from healthcheck parameters""" + protocol = self.health_check['ping_protocol'].upper() + path = "" + + if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: + path = self.health_check['ping_path'] + + return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) + + +def main(): + argument_spec = dict( + state={'required': True, 'choices': ['present', 'absent']}, + name={'required': True}, + listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'}, + purge_listeners={'default': True, 'required': False, 'type': 'bool'}, + instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, + purge_instance_ids={'default': False, 'required': False, 'type': 'bool'}, + zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, + purge_zones={'default': False, 'required': False, 'type': 'bool'}, + security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, + security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, + health_check={'default': None, 'required': False, 'type': 'dict'}, + subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, + purge_subnets={'default': False, 'required': False, 'type': 'bool'}, + scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']}, + connection_draining_timeout={'default': None, 'required': False, 'type': 'int'}, + idle_timeout={'default': None, 'type': 'int', 'required': False}, + cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False}, + stickiness={'default': None, 'required': False, 'type': 'dict'}, + access_logs={'default': None, 'required': False, 'type': 'dict'}, + wait={'default': False, 'type': 'bool', 'required': False}, + wait_timeout={'default': 60, 'type': 'int', 'required': False}, + tags={'default': None, 'required': False, 'type': 'dict'} + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + check_boto3=False, + mutually_exclusive=[['security_group_ids', 'security_group_names']] + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + name = module.params['name'] + state = module.params['state'] + listeners = module.params['listeners'] + purge_listeners = module.params['purge_listeners'] + instance_ids = module.params['instance_ids'] + purge_instance_ids = module.params['purge_instance_ids'] + zones = module.params['zones'] + purge_zones = module.params['purge_zones'] + security_group_ids = module.params['security_group_ids'] + security_group_names = module.params['security_group_names'] + health_check = module.params['health_check'] + access_logs = module.params['access_logs'] + subnets = module.params['subnets'] + purge_subnets = module.params['purge_subnets'] + scheme = module.params['scheme'] + connection_draining_timeout = module.params['connection_draining_timeout'] + idle_timeout = module.params['idle_timeout'] + cross_az_load_balancing = module.params['cross_az_load_balancing'] + stickiness = module.params['stickiness'] + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] + tags = module.params['tags'] + + if state == 'present' and not listeners: + module.fail_json(msg="At least one listener is required for ELB creation") + + if state == 'present' and not (zones or subnets): + module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") + + if wait_timeout > 600: + module.fail_json(msg='wait_timeout maximum is 600 seconds') + + if security_group_names: + security_group_ids = [] + try: + ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) + if subnets: # We have at least one subnet, ergo this is a VPC + vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params) + vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id + filters = {'vpc_id': vpc_id} + else: + filters = None + grp_details = ec2.get_all_security_groups(filters=filters) + + for group_name in security_group_names: + if isinstance(group_name, string_types): + group_name = [group_name] + + group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name] + security_group_ids.extend(group_id) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json_aws(e) + + elb_man = ElbManager(module, name, listeners, purge_listeners, zones, + purge_zones, security_group_ids, health_check, + subnets, purge_subnets, scheme, + connection_draining_timeout, idle_timeout, + cross_az_load_balancing, + access_logs, stickiness, wait, wait_timeout, tags, + region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids, + **aws_connect_params) + + # check for unsupported attributes for this version of boto + if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'): + module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute") + + if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): + module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") + + if idle_timeout and not elb_man._check_attribute_support('connecting_settings'): + module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute") + + if state == 'present': + elb_man.ensure_ok() + elif state == 'absent': + elb_man.ensure_gone() + + ansible_facts = {'ec2_elb': 'info'} + ec2_facts_result = dict(changed=elb_man.changed, + elb=elb_man.get_info(), + ansible_facts=ansible_facts) + + module.exit_json(**ec2_facts_result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py new file mode 100644 index 00000000..01a81f99 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py @@ -0,0 +1,882 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_eni +version_added: 1.0.0 +short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance +description: + - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is + provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status + of the network interface. +author: + - "Rob White (@wimnat)" + - "Mike Healey (@healem)" +options: + eni_id: + description: + - The ID of the ENI (to modify). + - If I(eni_id=None) and I(state=present), a new eni will be created. + type: str + instance_id: + description: + - Instance ID that you wish to attach ENI to. + - Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None). + type: str + private_ip_address: + description: + - Private IP address. + type: str + subnet_id: + description: + - ID of subnet in which to create the ENI. + type: str + description: + description: + - Optional description of the ENI. + type: str + security_groups: + description: + - List of security groups associated with the interface. Only used when I(state=present). + - Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID. + type: list + elements: str + state: + description: + - Create or delete ENI. + default: present + choices: [ 'present', 'absent' ] + type: str + device_index: + description: + - The index of the device for the network interface attachment on the instance. + default: 0 + type: int + attached: + description: + - Specifies if network interface should be attached or detached from instance. If omitted, attachment status + won't change + type: bool + force_detach: + description: + - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None) + or when deleting an interface with I(state=absent). + default: false + type: bool + delete_on_termination: + description: + - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the + interface is being modified, not on creation. + required: false + type: bool + source_dest_check: + description: + - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. + You can only specify this flag when the interface is being modified, not on creation. + required: false + type: bool + secondary_private_ip_addresses: + description: + - A list of IP addresses to assign as secondary IP addresses to the network interface. + This option is mutually exclusive of I(secondary_private_ip_address_count) + required: false + type: list + elements: str + purge_secondary_private_ip_addresses: + description: + - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified. + - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses. + default: false + type: bool + secondary_private_ip_address_count: + description: + - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses) + required: false + type: int + allow_reassignment: + description: + - Indicates whether to allow an IP address that is already assigned to another network interface or instance + to be reassigned to the specified network interface. + required: false + default: false + type: bool + name: + description: + - Name for the ENI. This will create a tag called "Name" with the value assigned here. + - This can be used in conjunction with I(subnet_id) as another means of identifiying a network interface. + - AWS does not enforce unique Name tags, so duplicate names are possible if you configure it that way. + If that is the case, you will need to provide other identifying information such as I(private_ip_address) or I(eni_id). + required: false + type: str + tags: + description: + - A hash/dictionary of tags to add to the new ENI or to add/remove from an existing one. Please note that + the name field sets the "Name" tag. + - To clear all tags, set this option to an empty dictionary to use in conjunction with I(purge_tags). + If you provide I(name), that tag will not be removed. + - To prevent removing any tags set I(purge_tags) to false. + type: dict + required: false + version_added: 1.3.0 + purge_tags: + description: + - Indicates whether to remove tags not specified in I(tags) or I(name). This means you have to specify all + the desired tags on each task affecting a network interface. + - If I(tags) is omitted or None this option is disregarded. + default: true + type: bool + version_added: 1.3.0 +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +notes: + - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id), + or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI. +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ENI. As no security group is defined, ENI will be created in default security group +- amazon.aws.ec2_eni: + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Create an ENI and attach it to an instance +- amazon.aws.ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Create an ENI with two secondary addresses +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxxx + state: present + secondary_private_ip_address_count: 2 + +# Assign a secondary IP address to an existing ENI +# This will purge any existing IPs +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxxx + eni_id: eni-yyyyyyyy + state: present + secondary_private_ip_addresses: + - 172.16.1.1 + +# Remove any secondary IP addresses from an existing ENI +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxxx + eni_id: eni-yyyyyyyy + state: present + secondary_private_ip_address_count: 0 + +# Destroy an ENI, detaching it from any instance if necessary +- amazon.aws.ec2_eni: + eni_id: eni-xxxxxxx + force_detach: true + state: absent + +# Update an ENI +- amazon.aws.ec2_eni: + eni_id: eni-xxxxxxx + description: "My new description" + state: present + +# Update an ENI using name and subnet_id +- amazon.aws.ec2_eni: + name: eni-20 + subnet_id: subnet-xxxxxxx + description: "My new description" + state: present + +# Update an ENI identifying it by private_ip_address and subnet_id +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxx + private_ip_address: 172.16.1.1 + description: "My new description" + +# Detach an ENI from an instance +- amazon.aws.ec2_eni: + eni_id: eni-xxxxxxx + instance_id: None + state: present + +### Delete an interface on termination +# First create the interface +- amazon.aws.ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + register: eni + +# Modify the interface to enable the delete_on_terminaton flag +- amazon.aws.ec2_eni: + eni_id: "{{ eni.interface.id }}" + delete_on_termination: true + +''' + + +RETURN = ''' +interface: + description: Network interface attributes + returned: when state != absent + type: complex + contains: + description: + description: interface description + type: str + sample: Firewall network interface + groups: + description: list of security groups + type: list + elements: dict + sample: [ { "sg-f8a8a9da": "default" } ] + id: + description: network interface id + type: str + sample: "eni-1d889198" + mac_address: + description: interface's physical address + type: str + sample: "00:00:5E:00:53:23" + name: + description: The name of the ENI + type: str + sample: "my-eni-20" + owner_id: + description: aws account id + type: str + sample: 812381371 + private_ip_address: + description: primary ip address of this interface + type: str + sample: 10.20.30.40 + private_ip_addresses: + description: list of all private ip addresses associated to this interface + type: list + elements: dict + sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ] + source_dest_check: + description: value of source/dest check flag + type: bool + sample: True + status: + description: network interface status + type: str + sample: "pending" + subnet_id: + description: which vpc subnet the interface is bound + type: str + sample: subnet-b0a0393c + tags: + description: The dictionary of tags associated with the ENI + type: dict + sample: { "Name": "my-eni", "group": "Finance" } + vpc_id: + description: which vpc this network interface is bound + type: str + sample: vpc-9a9a9da + +''' + +import time + +try: + import botocore.exceptions +except ImportError: + pass # Handled by AnsibleAWSModule + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import get_ec2_security_group_ids_from_names +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import compare_aws_tags +from ..module_utils.waiters import get_waiter + + +def get_eni_info(interface): + + # Private addresses + private_addresses = [] + if "PrivateIpAddresses" in interface: + for ip in interface["PrivateIpAddresses"]: + private_addresses.append({'private_ip_address': ip["PrivateIpAddress"], 'primary_address': ip["Primary"]}) + + groups = {} + if "Groups" in interface: + for group in interface["Groups"]: + groups[group["GroupId"]] = group["GroupName"] + + interface_info = {'id': interface.get("NetworkInterfaceId"), + 'subnet_id': interface.get("SubnetId"), + 'vpc_id': interface.get("VpcId"), + 'description': interface.get("Description"), + 'owner_id': interface.get("OwnerId"), + 'status': interface.get("Status"), + 'mac_address': interface.get("MacAddress"), + 'private_ip_address': interface.get("PrivateIpAddress"), + 'source_dest_check': interface.get("SourceDestCheck"), + 'groups': groups, + 'private_ip_addresses': private_addresses + } + + if "TagSet" in interface: + tags = {} + name = None + for tag in interface["TagSet"]: + tags[tag["Key"]] = tag["Value"] + if tag["Key"] == "Name": + name = tag["Value"] + interface_info["tags"] = tags + + if name is not None: + interface_info["name"] = name + + if "Attachment" in interface: + interface_info['attachment'] = { + 'attachment_id': interface["Attachment"].get("AttachmentId"), + 'instance_id': interface["Attachment"].get("InstanceId"), + 'device_index': interface["Attachment"].get("DeviceIndex"), + 'status': interface["Attachment"].get("Status"), + 'attach_time': interface["Attachment"].get("AttachTime"), + 'delete_on_termination': interface["Attachment"].get("DeleteOnTermination"), + } + + return interface_info + + +def correct_ips(connection, ip_list, module, eni_id): + all_there = True + eni = describe_eni(connection, module, eni_id) + private_addresses = set() + if "PrivateIpAddresses" in eni: + for ip in eni["PrivateIpAddresses"]: + private_addresses.add(ip["PrivateIpAddress"]) + + ip_set = set(ip_list) + + return ip_set.issubset(private_addresses) + + +def absent_ips(connection, ip_list, module, eni_id): + all_there = True + eni = describe_eni(connection, module, eni_id) + private_addresses = set() + if "PrivateIpAddresses" in eni: + for ip in eni["PrivateIpAddresses"]: + private_addresses.add(ip["PrivateIpAddress"]) + + ip_set = set(ip_list) + + return not ip_set.union(private_addresses) + + +def correct_ip_count(connection, ip_count, module, eni_id): + eni = describe_eni(connection, module, eni_id) + private_addresses = set() + if "PrivateIpAddresses" in eni: + for ip in eni["PrivateIpAddresses"]: + private_addresses.add(ip["PrivateIpAddress"]) + + if len(private_addresses) == ip_count: + return True + else: + return False + + +def wait_for(function_pointer, *args): + max_wait = 30 + interval_time = 3 + current_wait = 0 + while current_wait < max_wait: + time.sleep(interval_time) + current_wait += interval_time + if function_pointer(*args): + break + + +def create_eni(connection, vpc_id, module): + + instance_id = module.params.get("instance_id") + attached = module.params.get("attached") + if instance_id == 'None': + instance_id = None + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = get_ec2_security_group_ids_from_names( + module.params.get('security_groups'), + connection, + vpc_id=vpc_id, + boto3=True + ) + secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") + secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") + changed = False + tags = module.params.get("tags") + name = module.params.get("name") + purge_tags = module.params.get("purge_tags") + + try: + args = {"SubnetId": subnet_id} + if private_ip_address: + args["PrivateIpAddress"] = private_ip_address + if description: + args["Description"] = description + if len(security_groups) > 0: + args["Groups"] = security_groups + eni_dict = connection.create_network_interface(aws_retry=True, **args) + eni = eni_dict["NetworkInterface"] + # Once we have an ID make sure we're always modifying the same object + eni_id = eni["NetworkInterfaceId"] + get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + + if attached and instance_id is not None: + try: + connection.attach_network_interface( + aws_retry=True, + InstanceId=instance_id, + DeviceIndex=device_index, + NetworkInterfaceId=eni["NetworkInterfaceId"] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + raise + get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + + if secondary_private_ip_address_count is not None: + try: + connection.assign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni["NetworkInterfaceId"], + SecondaryPrivateIpAddressCount=secondary_private_ip_address_count + ) + wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + raise + + if secondary_private_ip_addresses is not None: + try: + connection.assign_private_ip_addresses( + NetworkInterfaceId=eni["NetworkInterfaceId"], + PrivateIpAddresses=secondary_private_ip_addresses + ) + wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + raise + + manage_tags(eni, name, tags, purge_tags, connection) + + # Refresh the eni data + eni = describe_eni(connection, module, eni_id) + changed = True + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, + "Failed to create eni {0} for {1} in {2} with {3}".format(name, subnet_id, vpc_id, private_ip_address) + ) + + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def modify_eni(connection, module, eni): + + instance_id = module.params.get("instance_id") + attached = module.params.get("attached") + device_index = module.params.get("device_index") + description = module.params.get('description') + security_groups = module.params.get('security_groups') + force_detach = module.params.get("force_detach") + source_dest_check = module.params.get("source_dest_check") + delete_on_termination = module.params.get("delete_on_termination") + secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") + purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses") + secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") + allow_reassignment = module.params.get("allow_reassignment") + changed = False + tags = module.params.get("tags") + name = module.params.get("name") + purge_tags = module.params.get("purge_tags") + + eni = uniquely_find_eni(connection, module, eni) + eni_id = eni["NetworkInterfaceId"] + + try: + if description is not None: + if "Description" not in eni or eni["Description"] != description: + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + Description={'Value': description} + ) + changed = True + if len(security_groups) > 0: + groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"], boto3=True) + if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups): + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + Groups=groups + ) + changed = True + if source_dest_check is not None: + if "SourceDestCheck" not in eni or eni["SourceDestCheck"] != source_dest_check: + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + SourceDestCheck={'Value': source_dest_check} + ) + changed = True + if delete_on_termination is not None and "Attachment" in eni: + if eni["Attachment"]["DeleteOnTermination"] is not delete_on_termination: + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + Attachment={'AttachmentId': eni["Attachment"]["AttachmentId"], + 'DeleteOnTermination': delete_on_termination} + ) + changed = True + if delete_on_termination: + waiter = "network_interface_delete_on_terminate" + else: + waiter = "network_interface_no_delete_on_terminate" + get_waiter(connection, waiter).wait(NetworkInterfaceIds=[eni_id]) + + current_secondary_addresses = [] + if "PrivateIpAddresses" in eni: + current_secondary_addresses = [i["PrivateIpAddress"] for i in eni["PrivateIpAddresses"] if not i["Primary"]] + + if secondary_private_ip_addresses is not None: + secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)) + if secondary_addresses_to_remove and purge_secondary_private_ip_addresses: + connection.unassign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + PrivateIpAddresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)), + ) + wait_for(absent_ips, connection, secondary_addresses_to_remove, module, eni_id) + changed = True + secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses)) + if secondary_addresses_to_add: + connection.assign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + PrivateIpAddresses=secondary_addresses_to_add, + AllowReassignment=allow_reassignment + ) + wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id) + changed = True + + if secondary_private_ip_address_count is not None: + current_secondary_address_count = len(current_secondary_addresses) + if secondary_private_ip_address_count > current_secondary_address_count: + connection.assign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count), + AllowReassignment=allow_reassignment + ) + wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) + changed = True + elif secondary_private_ip_address_count < current_secondary_address_count: + # How many of these addresses do we want to remove + secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count + connection.unassign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count] + ) + wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) + changed = True + + if attached is True: + if "Attachment" in eni and eni["Attachment"]["InstanceId"] != instance_id: + detach_eni(connection, eni, module) + connection.attach_network_interface( + aws_retry=True, + InstanceId=instance_id, + DeviceIndex=device_index, + NetworkInterfaceId=eni_id, + ) + get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + changed = True + if "Attachment" not in eni: + connection.attach_network_interface( + aws_retry=True, + InstanceId=instance_id, + DeviceIndex=device_index, + NetworkInterfaceId=eni_id, + ) + get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + changed = True + + elif attached is False: + changed |= detach_eni(connection, eni, module) + get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + + changed |= manage_tags(eni, name, tags, purge_tags, connection) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to modify eni {0}".format(eni_id)) + + eni = describe_eni(connection, module, eni_id) + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def delete_eni(connection, module): + + eni = uniquely_find_eni(connection, module) + if not eni: + module.exit_json(changed=False) + + eni_id = eni["NetworkInterfaceId"] + force_detach = module.params.get("force_detach") + + try: + if force_detach is True: + if "Attachment" in eni: + connection.detach_network_interface( + aws_retry=True, + AttachmentId=eni["Attachment"]["AttachmentId"], + Force=True + ) + # Wait to allow detachment to finish + get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + changed = True + else: + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + changed = True + + module.exit_json(changed=changed) + except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + module.exit_json(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Failure during delete of {0}".format(eni_id)) + + +def detach_eni(connection, eni, module): + + attached = module.params.get("attached") + eni_id = eni["NetworkInterfaceId"] + + force_detach = module.params.get("force_detach") + if "Attachment" in eni: + connection.detach_network_interface( + aws_retry=True, + AttachmentId=eni["Attachment"]["AttachmentId"], + Force=force_detach + ) + get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + return True + + return False + + +def describe_eni(connection, module, eni_id): + try: + eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id]) + if eni_result["NetworkInterfaces"]: + return eni_result["NetworkInterfaces"][0] + else: + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to describe eni with id: {0}".format(eni_id)) + + +def uniquely_find_eni(connection, module, eni=None): + + if eni: + # In the case of create, eni_id will not be a param but we can still get the eni_id after creation + if "NetworkInterfaceId" in eni: + eni_id = eni["NetworkInterfaceId"] + else: + eni_id = None + else: + eni_id = module.params.get("eni_id") + + private_ip_address = module.params.get('private_ip_address') + subnet_id = module.params.get('subnet_id') + instance_id = module.params.get('instance_id') + device_index = module.params.get('device_index') + attached = module.params.get('attached') + name = module.params.get("name") + + filters = [] + + # proceed only if we're unequivocally specifying an ENI + if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None): + return None + + if eni_id: + filters.append({'Name': 'network-interface-id', + 'Values': [eni_id]}) + + if private_ip_address and subnet_id and not filters: + filters.append({'Name': 'private-ip-address', + 'Values': [private_ip_address]}) + filters.append({'Name': 'subnet-id', + 'Values': [subnet_id]}) + + if not attached and instance_id and device_index and not filters: + filters.append({'Name': 'attachment.instance-id', + 'Values': [instance_id]}) + filters.append({'Name': 'attachment.device-index', + 'Values': [device_index]}) + + if name and subnet_id and not filters: + filters.append({'Name': 'tag:Name', + 'Values': [name]}) + filters.append({'Name': 'subnet-id', + 'Values': [subnet_id]}) + + if not filters: + return None + + try: + eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)["NetworkInterfaces"] + if len(eni_result) == 1: + return eni_result[0] + else: + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to find unique eni with filters: {0}".format(filters)) + + return None + + +def get_sec_group_list(groups): + + # Build list of remote security groups + remote_security_groups = [] + for group in groups: + remote_security_groups.append(group["GroupId"].encode()) + + return remote_security_groups + + +def _get_vpc_id(connection, module, subnet_id): + + try: + subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) + return subnets["Subnets"][0]["VpcId"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to get vpc_id for {0}".format(subnet_id)) + + +def manage_tags(eni, name, new_tags, purge_tags, connection): + changed = False + + if "TagSet" in eni: + old_tags = boto3_tag_list_to_ansible_dict(eni['TagSet']) + elif new_tags: + old_tags = {} + else: + # No new tags and nothing in TagSet + return False + + # Do not purge tags unless tags is not None + if new_tags is None: + purge_tags = False + new_tags = {} + + if name: + new_tags['Name'] = name + + tags_to_set, tags_to_delete = compare_aws_tags( + old_tags, new_tags, + purge_tags=purge_tags, + ) + if tags_to_set: + connection.create_tags( + aws_retry=True, + Resources=[eni['NetworkInterfaceId']], + Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) + changed |= True + if tags_to_delete: + delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete) + connection.delete_tags( + aws_retry=True, + Resources=[eni['NetworkInterfaceId']], + Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values)) + changed |= True + return changed + + +def main(): + argument_spec = dict( + eni_id=dict(default=None, type='str'), + instance_id=dict(default=None, type='str'), + private_ip_address=dict(type='str'), + subnet_id=dict(type='str'), + description=dict(type='str'), + security_groups=dict(default=[], type='list', elements='str'), + device_index=dict(default=0, type='int'), + state=dict(default='present', choices=['present', 'absent']), + force_detach=dict(default='no', type='bool'), + source_dest_check=dict(default=None, type='bool'), + delete_on_termination=dict(default=None, type='bool'), + secondary_private_ip_addresses=dict(default=None, type='list', elements='str'), + purge_secondary_private_ip_addresses=dict(default=False, type='bool'), + secondary_private_ip_address_count=dict(default=None, type='int'), + allow_reassignment=dict(default=False, type='bool'), + attached=dict(default=None, type='bool'), + name=dict(default=None, type='str'), + tags=dict(type='dict'), + purge_tags=dict(default=True, type='bool') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['secondary_private_ip_addresses', 'secondary_private_ip_address_count'] + ], + required_if=([ + ('attached', True, ['instance_id']), + ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses']) + ]) + ) + + retry_decorator = AWSRetry.jittered_backoff( + catch_extra_error_codes=['IncorrectState'], + ) + connection = module.client('ec2', retry_decorator=retry_decorator) + state = module.params.get("state") + + if state == 'present': + eni = uniquely_find_eni(connection, module) + if eni is None: + subnet_id = module.params.get("subnet_id") + if subnet_id is None: + module.fail_json(msg='subnet_id is required when creating a new ENI') + + vpc_id = _get_vpc_id(connection, module, subnet_id) + create_eni(connection, vpc_id, module) + else: + modify_eni(connection, module, eni) + + elif state == 'absent': + delete_eni(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_facts.py new file mode 100644 index 00000000..4741dfbc --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_facts.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_eni_info +version_added: 1.0.0 +short_description: Gather information about ec2 ENI interfaces in AWS +description: + - Gather information about ec2 ENI interfaces in AWS. + - This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change. +author: "Rob White (@wimnat)" +requirements: [ boto3 ] +options: + eni_id: + description: + - The ID of the ENI. + - This option is mutually exclusive of I(filters). + type: str + version_added: 1.3.0 + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters. + - This option is mutually exclusive of I(eni_id). + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all ENIs +- amazon.aws.ec2_eni_info: + +# Gather information about a particular ENI +- amazon.aws.ec2_eni_info: + filters: + network-interface-id: eni-xxxxxxx + +''' + +RETURN = ''' +network_interfaces: + description: List of matching elastic network interfaces + returned: always + type: complex + contains: + association: + description: Info of associated elastic IP (EIP) + returned: When an ENI is associated with an EIP + type: dict + sample: { + allocation_id: "eipalloc-5sdf123", + association_id: "eipassoc-8sdf123", + ip_owner_id: "4415120123456", + public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com", + public_ip: "52.1.0.63" + } + attachment: + description: Info about attached ec2 instance + returned: When an ENI is attached to an ec2 instance + type: dict + sample: { + attach_time: "2017-08-05T15:25:47+00:00", + attachment_id: "eni-attach-149d21234", + delete_on_termination: false, + device_index: 1, + instance_id: "i-15b8d3cadbafa1234", + instance_owner_id: "4415120123456", + status: "attached" + } + availability_zone: + description: Availability zone of ENI + returned: always + type: str + sample: "us-east-1b" + description: + description: Description text for ENI + returned: always + type: str + sample: "My favourite network interface" + groups: + description: List of attached security groups + returned: always + type: list + sample: [ + { + group_id: "sg-26d0f1234", + group_name: "my_ec2_security_group" + } + ] + id: + description: The id of the ENI (alias for network_interface_id) + returned: always + type: str + sample: "eni-392fsdf" + interface_type: + description: Type of the network interface + returned: always + type: str + sample: "interface" + ipv6_addresses: + description: List of IPv6 addresses for this interface + returned: always + type: list + sample: [] + mac_address: + description: MAC address of the network interface + returned: always + type: str + sample: "0a:f8:10:2f:ab:a1" + name: + description: The Name tag of the ENI, often displayed in the AWS UIs as Name + returned: When a Name tag has been set + type: str + version_added: 1.3.0 + network_interface_id: + description: The id of the ENI + returned: always + type: str + sample: "eni-392fsdf" + owner_id: + description: AWS account id of the owner of the ENI + returned: always + type: str + sample: "4415120123456" + private_dns_name: + description: Private DNS name for the ENI + returned: always + type: str + sample: "ip-172-16-1-180.ec2.internal" + private_ip_address: + description: Private IP address for the ENI + returned: always + type: str + sample: "172.16.1.180" + private_ip_addresses: + description: List of private IP addresses attached to the ENI + returned: always + type: list + sample: [] + requester_id: + description: The ID of the entity that launched the ENI + returned: always + type: str + sample: "AIDAIONYVJQNIAZFT3ABC" + requester_managed: + description: Indicates whether the network interface is being managed by an AWS service. + returned: always + type: bool + sample: false + source_dest_check: + description: Indicates whether the network interface performs source/destination checking. + returned: always + type: bool + sample: false + status: + description: Indicates if the network interface is attached to an instance or not + returned: always + type: str + sample: "in-use" + subnet_id: + description: Subnet ID the ENI is in + returned: always + type: str + sample: "subnet-7bbf01234" + tags: + description: Dictionary of tags added to the ENI + returned: always + type: dict + sample: {} + version_added: 1.3.0 + tag_set: + description: Dictionary of tags added to the ENI + returned: always + type: dict + sample: {} + vpc_id: + description: ID of the VPC the network interface it part of + returned: always + type: str + sample: "vpc-b3f1f123" +''' + +try: + from botocore.exceptions import ClientError + from botocore.exceptions import NoCredentialsError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_eni(connection, module): + + params = {} + # Options are mutually exclusive + if module.params.get("eni_id"): + params['NetworkInterfaceIds'] = [module.params.get("eni_id")] + elif module.params.get("filters"): + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + else: + params['Filters'] = [] + + try: + network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces'] + except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + module.exit_json(network_interfaces=[]) + except (ClientError, NoCredentialsError) as e: + module.fail_json_aws(e) + + # Modify boto3 tags list to be ansible friendly dict and then camel_case + camel_network_interfaces = [] + for network_interface in network_interfaces_result: + network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet']) + network_interface['Tags'] = network_interface['TagSet'] + if 'Name' in network_interface['Tags']: + network_interface['Name'] = network_interface['Tags']['Name'] + # Added id to interface info to be compatible with return values of ec2_eni module: + network_interface['Id'] = network_interface['NetworkInterfaceId'] + camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet'])) + + module.exit_json(network_interfaces=camel_network_interfaces) + + +def get_eni_info(interface): + + # Private addresses + private_addresses = [] + for ip in interface.private_ip_addresses: + private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary}) + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + 'private_ip_addresses': private_addresses + } + + if hasattr(interface, 'publicDnsName'): + interface_info['association'] = {'public_ip_address': interface.publicIp, + 'public_dns_name': interface.publicDnsName, + 'ip_owner_id': interface.ipOwnerId + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + + +def main(): + argument_spec = dict( + eni_id=dict(type='str'), + filters=dict(default=None, type='dict') + ) + mutually_exclusive = [ + ['eni_id', 'filters'] + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_eni_facts': + module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_eni(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py new file mode 100644 index 00000000..4741dfbc --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_eni_info +version_added: 1.0.0 +short_description: Gather information about ec2 ENI interfaces in AWS +description: + - Gather information about ec2 ENI interfaces in AWS. + - This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change. +author: "Rob White (@wimnat)" +requirements: [ boto3 ] +options: + eni_id: + description: + - The ID of the ENI. + - This option is mutually exclusive of I(filters). + type: str + version_added: 1.3.0 + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters. + - This option is mutually exclusive of I(eni_id). + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all ENIs +- amazon.aws.ec2_eni_info: + +# Gather information about a particular ENI +- amazon.aws.ec2_eni_info: + filters: + network-interface-id: eni-xxxxxxx + +''' + +RETURN = ''' +network_interfaces: + description: List of matching elastic network interfaces + returned: always + type: complex + contains: + association: + description: Info of associated elastic IP (EIP) + returned: When an ENI is associated with an EIP + type: dict + sample: { + allocation_id: "eipalloc-5sdf123", + association_id: "eipassoc-8sdf123", + ip_owner_id: "4415120123456", + public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com", + public_ip: "52.1.0.63" + } + attachment: + description: Info about attached ec2 instance + returned: When an ENI is attached to an ec2 instance + type: dict + sample: { + attach_time: "2017-08-05T15:25:47+00:00", + attachment_id: "eni-attach-149d21234", + delete_on_termination: false, + device_index: 1, + instance_id: "i-15b8d3cadbafa1234", + instance_owner_id: "4415120123456", + status: "attached" + } + availability_zone: + description: Availability zone of ENI + returned: always + type: str + sample: "us-east-1b" + description: + description: Description text for ENI + returned: always + type: str + sample: "My favourite network interface" + groups: + description: List of attached security groups + returned: always + type: list + sample: [ + { + group_id: "sg-26d0f1234", + group_name: "my_ec2_security_group" + } + ] + id: + description: The id of the ENI (alias for network_interface_id) + returned: always + type: str + sample: "eni-392fsdf" + interface_type: + description: Type of the network interface + returned: always + type: str + sample: "interface" + ipv6_addresses: + description: List of IPv6 addresses for this interface + returned: always + type: list + sample: [] + mac_address: + description: MAC address of the network interface + returned: always + type: str + sample: "0a:f8:10:2f:ab:a1" + name: + description: The Name tag of the ENI, often displayed in the AWS UIs as Name + returned: When a Name tag has been set + type: str + version_added: 1.3.0 + network_interface_id: + description: The id of the ENI + returned: always + type: str + sample: "eni-392fsdf" + owner_id: + description: AWS account id of the owner of the ENI + returned: always + type: str + sample: "4415120123456" + private_dns_name: + description: Private DNS name for the ENI + returned: always + type: str + sample: "ip-172-16-1-180.ec2.internal" + private_ip_address: + description: Private IP address for the ENI + returned: always + type: str + sample: "172.16.1.180" + private_ip_addresses: + description: List of private IP addresses attached to the ENI + returned: always + type: list + sample: [] + requester_id: + description: The ID of the entity that launched the ENI + returned: always + type: str + sample: "AIDAIONYVJQNIAZFT3ABC" + requester_managed: + description: Indicates whether the network interface is being managed by an AWS service. + returned: always + type: bool + sample: false + source_dest_check: + description: Indicates whether the network interface performs source/destination checking. + returned: always + type: bool + sample: false + status: + description: Indicates if the network interface is attached to an instance or not + returned: always + type: str + sample: "in-use" + subnet_id: + description: Subnet ID the ENI is in + returned: always + type: str + sample: "subnet-7bbf01234" + tags: + description: Dictionary of tags added to the ENI + returned: always + type: dict + sample: {} + version_added: 1.3.0 + tag_set: + description: Dictionary of tags added to the ENI + returned: always + type: dict + sample: {} + vpc_id: + description: ID of the VPC the network interface it part of + returned: always + type: str + sample: "vpc-b3f1f123" +''' + +try: + from botocore.exceptions import ClientError + from botocore.exceptions import NoCredentialsError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_eni(connection, module): + + params = {} + # Options are mutually exclusive + if module.params.get("eni_id"): + params['NetworkInterfaceIds'] = [module.params.get("eni_id")] + elif module.params.get("filters"): + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + else: + params['Filters'] = [] + + try: + network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces'] + except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + module.exit_json(network_interfaces=[]) + except (ClientError, NoCredentialsError) as e: + module.fail_json_aws(e) + + # Modify boto3 tags list to be ansible friendly dict and then camel_case + camel_network_interfaces = [] + for network_interface in network_interfaces_result: + network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet']) + network_interface['Tags'] = network_interface['TagSet'] + if 'Name' in network_interface['Tags']: + network_interface['Name'] = network_interface['Tags']['Name'] + # Added id to interface info to be compatible with return values of ec2_eni module: + network_interface['Id'] = network_interface['NetworkInterfaceId'] + camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet'])) + + module.exit_json(network_interfaces=camel_network_interfaces) + + +def get_eni_info(interface): + + # Private addresses + private_addresses = [] + for ip in interface.private_ip_addresses: + private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary}) + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + 'private_ip_addresses': private_addresses + } + + if hasattr(interface, 'publicDnsName'): + interface_info['association'] = {'public_ip_address': interface.publicIp, + 'public_dns_name': interface.publicDnsName, + 'ip_owner_id': interface.ipOwnerId + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + + +def main(): + argument_spec = dict( + eni_id=dict(type='str'), + filters=dict(default=None, type='dict') + ) + mutually_exclusive = [ + ['eni_id', 'filters'] + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_eni_facts': + module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_eni(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group.py new file mode 100644 index 00000000..2338aa69 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group.py @@ -0,0 +1,1380 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_group +version_added: 1.0.0 +author: "Andrew de Quincey (@adq)" +requirements: [ boto3 ] +short_description: maintain an ec2 VPC security group. +description: + - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5. +options: + name: + description: + - Name of the security group. + - One of and only one of I(name) or I(group_id) is required. + - Required if I(state=present). + required: false + type: str + group_id: + description: + - Id of group to delete (works only with absent). + - One of and only one of I(name) or I(group_id) is required. + required: false + type: str + description: + description: + - Description of the security group. Required when C(state) is C(present). + required: false + type: str + vpc_id: + description: + - ID of the VPC to create the group in. + required: false + type: str + rules: + description: + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, + no inbound rules will be enabled. Rules list may include its own name in `group_name`. + This allows idempotent loopback additions (e.g. allow group to access itself). + Rule sources list support was added in version 2.4. This allows to define multiple sources per + source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed. + In version 2.5 support for rule descriptions was added. + required: false + type: list + elements: dict + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: str + description: + - Name of the Security Group that traffic is coming from. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + from_port: + type: int + description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. + to_port: + type: int + description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. + rule_desc: + type: str + description: A description for the rule. + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, + a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. + Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions + was added. + required: false + type: list + elements: dict + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: str + description: + - Name of the Security Group that traffic is going to. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + from_port: + type: int + description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports. + to_port: + type: int + description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports. + rule_desc: + type: str + description: A description for the rule. + state: + description: + - Create or delete a security group. + required: false + default: 'present' + choices: [ "present", "absent" ] + aliases: [] + type: str + purge_rules: + description: + - Purge existing rules on security group that are not found in rules. + required: false + default: 'true' + aliases: [] + type: bool + purge_rules_egress: + description: + - Purge existing rules_egress on security group that are not found in rules_egress. + required: false + default: 'true' + aliases: [] + type: bool + tags: + description: + - A dictionary of one or more tags to assign to the security group. + required: false + type: dict + aliases: ['resource_tags'] + purge_tags: + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then + tags will not be modified. + required: false + default: yes + type: bool + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. + - Preview diff mode support is added in version 2.7. +''' + +EXAMPLES = ''' +- name: example using security group rule descriptions + amazon.aws.ec2_group: + name: "{{ name }}" + description: sg with rule descriptions + vpc_id: vpc-xxxxxxxx + profile: "{{ aws_profile }}" + region: us-east-1 + rules: + - proto: tcp + ports: + - 80 + cidr_ip: 0.0.0.0/0 + rule_desc: allow all on port 80 + +- name: example ec2 group + amazon.aws.ec2_group: + name: example + description: an example EC2 group + vpc_id: 12345 + region: eu-west-1 + aws_secret_key: SECRET + aws_access_key: ACCESS + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + # this should only be needed for EC2 Classic security group rules + # because in a VPC an ELB will use a user-account security group + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123412341234/sg-87654321/exact-name-of-sg + - proto: udp + from_port: 10050 + to_port: 10050 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10051 + to_port: 10051 + group_id: sg-12345678 + - proto: icmp + from_port: 8 # icmp type, -1 = any type + to_port: -1 # icmp subtype, -1 = any subtype + cidr_ip: 10.0.0.0/8 + - proto: all + # the containing group name may be specified here + group_name: example + - proto: all + # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6), + # traffic on all ports is allowed, regardless of any ports you specify + from_port: 10050 # this value is ignored + to_port: 10050 # this value is ignored + cidr_ip: 10.0.0.0/8 + + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + cidr_ipv6: 64:ff9b::/96 + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group + +- name: example2 ec2 group + amazon.aws.ec2_group: + name: example2 + description: an example2 EC2 group + vpc_id: 12345 + region: eu-west-1 + rules: + # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port). + - proto: tcp + ports: 22 + group_name: example-vpn + - proto: tcp + ports: + - 80 + - 443 + - 8080-8099 + cidr_ip: 0.0.0.0/0 + # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. + - proto: tcp + ports: + - 6379 + - 26379 + group_name: + - example-vpn + - example-redis + - proto: tcp + ports: 5665 + group_name: example-vpn + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + cidr_ipv6: + - 2607:F8B0::/32 + - 64:ff9b::/96 + group_id: + - sg-edcd9784 + diff: True + +- name: "Delete group by its id" + amazon.aws.ec2_group: + region: eu-west-1 + group_id: sg-33b4ee5b + state: absent +''' + +RETURN = ''' +group_name: + description: Security group name + sample: My Security Group + type: str + returned: on create/update +group_id: + description: Security group id + sample: sg-abcd1234 + type: str + returned: on create/update +description: + description: Description of security group + sample: My Security Group + type: str + returned: on create/update +tags: + description: Tags associated with the security group + sample: + Name: My Security Group + Purpose: protecting stuff + type: dict + returned: on create/update +vpc_id: + description: ID of VPC to which the security group belongs + sample: vpc-abcd1234 + type: str + returned: on create/update +ip_permissions: + description: Inbound rules associated with the security group. + sample: + - from_port: 8182 + ip_protocol: tcp + ip_ranges: + - cidr_ip: "198.51.100.1/32" + ipv6_ranges: [] + prefix_list_ids: [] + to_port: 8182 + user_id_group_pairs: [] + type: list + returned: on create/update +ip_permissions_egress: + description: Outbound rules associated with the security group. + sample: + - ip_protocol: -1 + ip_ranges: + - cidr_ip: "0.0.0.0/0" + ipv6_ranges: [] + prefix_list_ids: [] + user_id_group_pairs: [] + type: list + returned: on create/update +owner_id: + description: AWS Account ID of the security group + sample: 123456789012 + type: int + returned: on create/update +''' + +import json +import re +import itertools +from copy import deepcopy +from time import sleep +from collections import namedtuple + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.network import to_ipv6_subnet +from ansible.module_utils.common.network import to_subnet +from ansible.module_utils.six import string_types +from ansible_collections.ansible.netcommon.plugins.module_utils.compat.ipaddress import IPv6Network +from ansible_collections.ansible.netcommon.plugins.module_utils.compat.ipaddress import ip_network + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import compare_aws_tags +from ..module_utils.iam import get_aws_account_id +from ..module_utils.waiters import get_waiter + + +Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description']) +valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix']) +current_account_id = None + + +def rule_cmp(a, b): + """Compare rules without descriptions""" + for prop in ['port_range', 'protocol', 'target', 'target_type']: + if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol): + # equal protocols can interchange `(-1, -1)` and `(None, None)` + if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)): + continue + elif getattr(a, prop) != getattr(b, prop): + return False + elif getattr(a, prop) != getattr(b, prop): + return False + return True + + +def rules_to_permissions(rules): + return [to_permission(rule) for rule in rules] + + +def to_permission(rule): + # take a Rule, output the serialized grant + perm = { + 'IpProtocol': rule.protocol, + } + perm['FromPort'], perm['ToPort'] = rule.port_range + if rule.target_type == 'ipv4': + perm['IpRanges'] = [{ + 'CidrIp': rule.target, + }] + if rule.description: + perm['IpRanges'][0]['Description'] = rule.description + elif rule.target_type == 'ipv6': + perm['Ipv6Ranges'] = [{ + 'CidrIpv6': rule.target, + }] + if rule.description: + perm['Ipv6Ranges'][0]['Description'] = rule.description + elif rule.target_type == 'group': + if isinstance(rule.target, tuple): + pair = {} + if rule.target[0]: + pair['UserId'] = rule.target[0] + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + if rule.target[1]: + pair['GroupId'] = rule.target[1] + elif rule.target[2]: + pair['GroupName'] = rule.target[2] + perm['UserIdGroupPairs'] = [pair] + else: + perm['UserIdGroupPairs'] = [{ + 'GroupId': rule.target + }] + if rule.description: + perm['UserIdGroupPairs'][0]['Description'] = rule.description + elif rule.target_type == 'ip_prefix': + perm['PrefixListIds'] = [{ + 'PrefixListId': rule.target, + }] + if rule.description: + perm['PrefixListIds'][0]['Description'] = rule.description + elif rule.target_type not in valid_targets: + raise ValueError('Invalid target type for rule {0}'.format(rule)) + return fix_port_and_protocol(perm) + + +def rule_from_group_permission(perm): + """ + Returns a rule dict from an existing security group. + + When using a security group as a target all 3 fields (OwnerId, GroupId, and + GroupName) need to exist in the target. This ensures consistency of the + values that will be compared to desired_ingress or desired_egress + in wait_for_rule_propagation(). + GroupId is preferred as it is more specific except when targeting 'amazon-' + prefixed security groups (such as EC2 Classic ELBs). + """ + def ports_from_permission(p): + if 'FromPort' not in p and 'ToPort' not in p: + return (None, None) + return (int(perm['FromPort']), int(perm['ToPort'])) + + # outputs a rule tuple + for target_key, target_subkey, target_type in [ + ('IpRanges', 'CidrIp', 'ipv4'), + ('Ipv6Ranges', 'CidrIpv6', 'ipv6'), + ('PrefixListIds', 'PrefixListId', 'ip_prefix'), + ]: + if target_key not in perm: + continue + for r in perm[target_key]: + # there may be several IP ranges here, which is ok + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + r[target_subkey], + target_type, + r.get('Description') + ) + if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']: + for pair in perm['UserIdGroupPairs']: + target = ( + pair.get('UserId', current_account_id), + pair.get('GroupId', None), + None, + ) + if pair.get('UserId', '').startswith('amazon-'): + # amazon-elb and amazon-prefix rules don't need + # group-id specified, so remove it when querying + # from permission + target = ( + pair.get('UserId', None), + None, + pair.get('GroupName', None), + ) + elif 'VpcPeeringConnectionId' not in pair and pair['UserId'] != current_account_id: + # EC2-Classic cross-account + pass + elif 'VpcPeeringConnectionId' in pair: + # EC2-VPC cross-account VPC peering + target = ( + pair.get('UserId', None), + pair.get('GroupId', None), + None, + ) + + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + target, + 'group', + pair.get('Description') + ) + + +# Wrap just this method so we can retry on missing groups +@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=['InvalidGroup.NotFound']) +def get_security_groups_with_backoff(client, **kwargs): + return client.describe_security_groups(**kwargs) + + +def sg_exists_with_backoff(client, **kwargs): + try: + return client.describe_security_groups(aws_retry=True, **kwargs) + except is_boto3_error_code('InvalidGroup.NotFound'): + return {'SecurityGroups': []} + + +def deduplicate_rules_args(rules): + """Returns unique rules""" + if rules is None: + return None + return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) + + +def validate_rule(module, rule): + VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix', + 'group_id', 'group_name', 'group_desc', + 'proto', 'from_port', 'to_port', 'rule_desc') + if not isinstance(rule, dict): + module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): + """ + Returns tuple of (target_type, target, group_created) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + + When using a security group as a target all 3 fields (OwnerId, GroupId, and + GroupName) need to exist in the target. This ensures consistency of the + values that will be compared to current_rules (from current_ingress and + current_egress) in wait_for_rule_propagation(). + """ + FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' + owner_id = current_account_id + group_id = None + group_name = None + target_group_created = False + + validate_rule(module, rule) + if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg, + # and peer-VPC groups like 0987654321/sg-1234567890/example + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + if group_id and group_name: + if group_name.startswith('amazon-'): + # amazon-elb and amazon-prefix rules don't need group_id specified, + group_id = None + else: + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + group_name = None + return 'group', (owner_id, group_id, group_name), False + elif 'group_id' in rule: + return 'group', (owner_id, rule['group_id'], None), False + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name == name: + group_id = group['GroupId'] + groups[group_id] = group + groups[group_name] = group + elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): + # both are VPC groups, this is ok + group_id = groups[group_name]['GroupId'] + elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): + # both are EC2 classic, this is ok + group_id = groups[group_name]['GroupId'] + else: + auto_group = None + filters = {'group-name': group_name} + if vpc_id: + filters['vpc-id'] = vpc_id + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + if not rule.get('group_desc', '').strip(): + # retry describing the group once + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): + module.fail_json(msg="group %s will be automatically created by rule %s but " + "no description was provided" % (group_name, rule)) + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + elif not module.check_mode: + params = dict(GroupName=group_name, Description=rule['group_desc']) + if vpc_id: + params['VpcId'] = vpc_id + try: + auto_group = client.create_security_group(aws_retry=True, **params) + get_waiter( + client, 'security_group_exists', + ).wait( + GroupIds=[auto_group['GroupId']], + ) + except is_boto3_error_code('InvalidGroup.Duplicate'): + # The group exists, but didn't show up in any of our describe-security-groups calls + # Try searching on a filter for the name, and allow a retry window for AWS to update + # the model on their end. + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except IndexError as e: + module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + except ClientError as e: + module.fail_json_aws( + e, + msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + if auto_group is not None: + group_id = auto_group['GroupId'] + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True + return 'group', (owner_id, group_id, None), target_group_created + elif 'cidr_ip' in rule: + return 'ipv4', validate_ip(module, rule['cidr_ip']), False + elif 'cidr_ipv6' in rule: + return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False + elif 'ip_prefix' in rule: + return 'ip_prefix', rule['ip_prefix'], False + + module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule) + + +def ports_expand(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + if not isinstance(port, string_types): + ports_expanded.append((port,) * 2) + elif '-' in port: + ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1))) + else: + ports_expanded.append((int(port.strip()),) * 2) + + return ports_expanded + + +def rule_expand_ports(rule): + # takes a rule dict and returns a list of expanded rule dicts + if 'ports' not in rule: + if isinstance(rule.get('from_port'), string_types): + rule['from_port'] = int(rule.get('from_port')) + if isinstance(rule.get('to_port'), string_types): + rule['to_port'] = int(rule.get('to_port')) + return [rule] + + ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + + rule_expanded = [] + for from_to in ports_expand(ports): + temp_rule = rule.copy() + del temp_rule['ports'] + temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to) + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rules_expand_ports(rules): + # takes a list of rules and expands it based on 'ports' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_ports(rule_complex)] + + +def rule_expand_source(rule, source_type): + # takes a rule dict and returns a list of expanded rule dicts for specified source_type + sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] + source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') + + rule_expanded = [] + for source in sources: + temp_rule = rule.copy() + for s in source_types_all: + temp_rule.pop(s, None) + temp_rule[source_type] = source + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rule_expand_sources(rule): + # takes a rule dict and returns a list of expanded rule discts + source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule) + + return [r for stype in source_types + for r in rule_expand_source(rule, stype)] + + +def rules_expand_sources(rules): + # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_sources(rule_complex)] + + +def update_rules_description(module, client, rule_type, group_id, ip_permissions): + if module.check_mode: + return + try: + if rule_type == "in": + client.update_security_group_rule_descriptions_ingress( + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "out": + client.update_security_group_rule_descriptions_egress( + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) + + +def fix_port_and_protocol(permission): + for key in ('FromPort', 'ToPort'): + if key in permission: + if permission[key] is None: + del permission[key] + else: + permission[key] = int(permission[key]) + + permission['IpProtocol'] = to_text(permission['IpProtocol']) + + return permission + + +def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id): + if revoke_ingress: + revoke(client, module, revoke_ingress, group_id, 'in') + if revoke_egress: + revoke(client, module, revoke_egress, group_id, 'out') + return bool(revoke_ingress or revoke_egress) + + +def revoke(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.revoke_security_group_ingress( + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.revoke_security_group_egress( + aws_retry=True, + GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) + + +def add_new_permissions(client, module, new_ingress, new_egress, group_id): + if new_ingress: + authorize(client, module, new_ingress, group_id, 'in') + if new_egress: + authorize(client, module, new_egress, group_id, 'out') + return bool(new_ingress or new_egress) + + +def authorize(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.authorize_security_group_ingress( + aws_retry=True, + GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.authorize_security_group_egress( + aws_retry=True, + GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) + + +def validate_ip(module, cidr_ip): + split_addr = cidr_ip.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set + # Get the network bits if IPv4, and validate if IPv6. + try: + ip = to_subnet(split_addr[0], split_addr[1]) + if ip != cidr_ip: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format( + cidr_ip, ip)) + except ValueError: + # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here + try: + isinstance(ip_network(to_text(cidr_ip)), IPv6Network) + ip = cidr_ip + except ValueError: + # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError + # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits + ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] + if ip6 != cidr_ip: + module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6)) + return ip6 + return ip + return cidr_ip + + +def update_tags(client, module, group_id, current_tags, tags, purge_tags): + tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) + + if not module.check_mode: + if tags_to_delete: + try: + client.delete_tags(aws_retry=True, Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete]) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete)) + + # Add/update tags + if tags_need_modify: + try: + client.create_tags(aws_retry=True, Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_need_modify)) + + return bool(tags_need_modify or tags_to_delete) + + +def update_rule_descriptions(module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list): + changed = False + ingress_needs_desc_update = [] + egress_needs_desc_update = [] + + for present_rule in present_egress: + needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_egress_list.remove(r) + egress_needs_desc_update.extend(needs_update) + for present_rule in present_ingress: + needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_ingress_list.remove(r) + ingress_needs_desc_update.extend(needs_update) + + if ingress_needs_desc_update: + update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update)) + changed |= True + if egress_needs_desc_update: + update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update)) + changed |= True + return changed + + +def create_security_group(client, module, name, description, vpc_id): + if not module.check_mode: + params = dict(GroupName=name, Description=description) + if vpc_id: + params['VpcId'] = vpc_id + try: + group = client.create_security_group(aws_retry=True, **params) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to create security group") + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + # amazon sometimes takes a couple seconds to update the security group so wait till it exists + while True: + sleep(3) + group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + if group.get('VpcId') and not group.get('IpPermissionsEgress'): + pass + else: + break + return group + return None + + +def wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_ingress, purge_egress): + group_id = group['GroupId'] + tries = 6 + + def await_rules(group, desired_rules, purge, rule_key): + for i in range(tries): + current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], [])) + if purge and len(current_rules ^ set(desired_rules)) == 0: + return group + elif purge: + conflicts = current_rules ^ set(desired_rules) + # For cases where set comparison is equivalent, but invalid port/proto exist + for a, b in itertools.combinations(conflicts, 2): + if rule_cmp(a, b): + conflicts.discard(a) + conflicts.discard(b) + if not len(conflicts): + return group + elif current_rules.issuperset(desired_rules) and not purge: + return group + sleep(10) + group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] + module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules)) + return group + + group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] + if 'VpcId' in group and module.params.get('rules_egress') is not None: + group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress') + return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions') + + +def group_exists(client, module, vpc_id, group_id, name): + params = {'Filters': []} + if group_id: + params['GroupIds'] = [group_id] + if name: + # Add name to filters rather than params['GroupNames'] + # because params['GroupNames'] only checks the default vpc if no vpc is provided + params['Filters'].append({'Name': 'group-name', 'Values': [name]}) + if vpc_id: + params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]}) + # Don't filter by description to maintain backwards compatibility + + try: + security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', []) + all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', []) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error in describe_security_groups") + + if security_groups: + groups = dict((group['GroupId'], group) for group in all_groups) + groups.update(dict((group['GroupName'], group) for group in all_groups)) + if vpc_id: + vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id) + groups.update(vpc_wins) + # maintain backwards compatibility by using the last matching group + return security_groups[-1], groups + return None, {} + + +def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress): + if not hasattr(client, "update_security_group_rule_descriptions_egress"): + all_rules = rules if rules else [] + rules_egress if rules_egress else [] + if any('rule_desc' in rule for rule in all_rules): + module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.") + + +def get_diff_final_resource(client, module, security_group): + def get_account_id(security_group, module): + try: + owner_id = security_group.get('owner_id', current_account_id) + except (BotoCoreError, ClientError) as e: + owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) + return owner_id + + def get_final_tags(security_group_tags, specified_tags, purge_tags): + if specified_tags is None: + return security_group_tags + tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags) + end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete) + end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete)) + end_result_tags.update(tags_need_modify) + return end_result_tags + + def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules): + if specified_rules is None: + return security_group_rules + if purge_rules: + final_rules = [] + else: + final_rules = list(security_group_rules) + specified_rules = flatten_nested_targets(module, deepcopy(specified_rules)) + for rule in specified_rules: + format_rule = { + 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'), + 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': [] + } + if rule.get('proto', 'tcp') in ('all', '-1', -1): + format_rule['ip_protocol'] = '-1' + format_rule.pop('from_port') + format_rule.pop('to_port') + elif rule.get('ports'): + if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)): + rule['ports'] = [rule['ports']] + for port in rule.get('ports'): + if isinstance(port, string_types) and '-' in port: + format_rule['from_port'], format_rule['to_port'] = port.split('-') + else: + format_rule['from_port'] = format_rule['to_port'] = port + elif rule.get('from_port') or rule.get('to_port'): + format_rule['from_port'] = rule.get('from_port', rule.get('to_port')) + format_rule['to_port'] = rule.get('to_port', rule.get('from_port')) + for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'): + if rule.get(source_type): + rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type) + if rule.get('rule_desc'): + format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}] + else: + if not isinstance(rule[source_type], list): + rule[source_type] = [rule[source_type]] + format_rule[rule_key] = [{source_type: target} for target in rule[source_type]] + if rule.get('group_id') or rule.get('group_name'): + rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0]) + format_rule['user_id_group_pairs'] = [{ + 'description': rule_sg.get('description', rule_sg.get('group_desc')), + 'group_id': rule_sg.get('group_id', rule.get('group_id')), + 'group_name': rule_sg.get('group_name', rule.get('group_name')), + 'peering_status': rule_sg.get('peering_status'), + 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)), + 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']), + 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id') + }] + for k, v in list(format_rule['user_id_group_pairs'][0].items()): + if v is None: + format_rule['user_id_group_pairs'][0].pop(k) + final_rules.append(format_rule) + # Order final rules consistently + final_rules.sort(key=get_ip_permissions_sort_key) + return final_rules + security_group_ingress = security_group.get('ip_permissions', []) + specified_ingress = module.params['rules'] + purge_ingress = module.params['purge_rules'] + security_group_egress = security_group.get('ip_permissions_egress', []) + specified_egress = module.params['rules_egress'] + purge_egress = module.params['purge_rules_egress'] + return { + 'description': module.params['description'], + 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'), + 'group_name': security_group.get('group_name', module.params['name']), + 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), + 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), + 'owner_id': get_account_id(security_group, module), + 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']), + 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])} + + +def flatten_nested_targets(module, rules): + def _flatten(targets): + for target in targets: + if isinstance(target, list): + for t in _flatten(target): + yield t + elif isinstance(target, string_types): + yield target + + if rules is not None: + for rule in rules: + target_list_type = None + if isinstance(rule.get('cidr_ip'), list): + target_list_type = 'cidr_ip' + elif isinstance(rule.get('cidr_ipv6'), list): + target_list_type = 'cidr_ipv6' + if target_list_type is not None: + rule[target_list_type] = list(_flatten(rule[target_list_type])) + return rules + + +def get_rule_sort_key(dicts): + if dicts.get('cidr_ip'): + return dicts.get('cidr_ip') + elif dicts.get('cidr_ipv6'): + return dicts.get('cidr_ipv6') + elif dicts.get('prefix_list_id'): + return dicts.get('prefix_list_id') + elif dicts.get('group_id'): + return dicts.get('group_id') + return None + + +def get_ip_permissions_sort_key(rule): + if rule.get('ip_ranges'): + rule.get('ip_ranges').sort(key=get_rule_sort_key) + return rule.get('ip_ranges')[0]['cidr_ip'] + elif rule.get('ipv6_ranges'): + rule.get('ipv6_ranges').sort(key=get_rule_sort_key) + return rule.get('ipv6_ranges')[0]['cidr_ipv6'] + elif rule.get('prefix_list_ids'): + rule.get('prefix_list_ids').sort(key=get_rule_sort_key) + return rule.get('prefix_list_ids')[0]['prefix_list_id'] + elif rule.get('user_id_group_pairs'): + rule.get('user_id_group_pairs').sort(key=get_rule_sort_key) + return rule.get('user_id_group_pairs')[0]['group_id'] + return None + + +def main(): + argument_spec = dict( + name=dict(), + group_id=dict(), + description=dict(), + vpc_id=dict(), + rules=dict(type='list', elements='dict'), + rules_egress=dict(type='list', elements='dict'), + state=dict(default='present', type='str', choices=['present', 'absent']), + purge_rules=dict(default=True, required=False, type='bool'), + purge_rules_egress=dict(default=True, required=False, type='bool'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, required=False, type='bool') + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['name', 'group_id']], + required_if=[['state', 'present', ['name']]], + ) + + name = module.params['name'] + group_id = module.params['group_id'] + description = module.params['description'] + vpc_id = module.params['vpc_id'] + rules = flatten_nested_targets(module, deepcopy(module.params['rules'])) + rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress'])) + rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules))) + rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress))) + state = module.params.get('state') + purge_rules = module.params['purge_rules'] + purge_rules_egress = module.params['purge_rules_egress'] + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + + if state == 'present' and not description: + module.fail_json(msg='Must provide description when state is present.') + + changed = False + client = module.client('ec2', AWSRetry.jittered_backoff()) + + verify_rules_with_descriptions_permitted(client, module, rules, rules_egress) + group, groups = group_exists(client, module, vpc_id, group_id, name) + group_created_new = not bool(group) + + global current_account_id + current_account_id = get_aws_account_id(module) + + before = {} + after = {} + + # Ensure requested group is absent + if state == 'absent': + if group: + # found a match, delete it + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + try: + if not module.check_mode: + client.delete_security_group(aws_retry=True, GroupId=group['GroupId']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group) + else: + group = None + changed = True + else: + # no match found, no changes required + pass + + # Ensure requested group is present + elif state == 'present': + if group: + # existing group + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + if group['Description'] != description: + module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") + else: + # no match found, create it + group = create_security_group(client, module, name, description, vpc_id) + changed = True + + if tags is not None and group is not None: + current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) + changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags) + + if group: + named_tuple_ingress_list = [] + named_tuple_egress_list = [] + current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], []) + current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], []) + + for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list), + (rules_egress, 'out', named_tuple_egress_list)]: + if new_rules is None: + continue + for rule in new_rules: + target_type, target, target_group_created = get_target_from_rule( + module, client, rule, name, group, groups, vpc_id) + changed |= target_group_created + + if rule.get('proto', 'tcp') in ('all', '-1', -1): + rule['proto'] = '-1' + rule['from_port'] = None + rule['to_port'] = None + try: + int(rule.get('proto', 'tcp')) + rule['proto'] = to_text(rule.get('proto', 'tcp')) + rule['from_port'] = None + rule['to_port'] = None + except ValueError: + # rule does not use numeric protocol spec + pass + + named_tuple_rule_list.append( + Rule( + port_range=(rule['from_port'], rule['to_port']), + protocol=to_text(rule.get('proto', 'tcp')), + target=target, target_type=target_type, + description=rule.get('rule_desc'), + ) + ) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] + + if module.params.get('rules_egress') is None and 'VpcId' in group: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + if rule in current_egress: + named_tuple_egress_list.append(rule) + if rule not in current_egress: + current_egress.append(rule) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) + present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) + + if purge_rules: + revoke_ingress = [] + for p in present_ingress: + if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]): + revoke_ingress.append(to_permission(p)) + else: + revoke_ingress = [] + if purge_rules_egress and module.params.get('rules_egress') is not None: + if module.params.get('rules_egress') is []: + revoke_egress = [ + to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list) + if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + ] + else: + revoke_egress = [] + for p in present_egress: + if not any([rule_cmp(p, b) for b in named_tuple_egress_list]): + revoke_egress.append(to_permission(p)) + else: + revoke_egress = [] + + # named_tuple_ingress_list and named_tuple_egress_list get updated by + # method update_rule_descriptions, deep copy these two lists to new + # variables for the record of the 'desired' ingress and egress sg permissions + desired_ingress = deepcopy(named_tuple_ingress_list) + desired_egress = deepcopy(named_tuple_egress_list) + + changed |= update_rule_descriptions(module, client, group['GroupId'], present_ingress, + named_tuple_ingress_list, present_egress, named_tuple_egress_list) + + # Revoke old rules + changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId']) + rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress) + + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) + new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) + # Authorize new rules + changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId']) + + if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None: + # A new group with no rules provided is already being awaited. + # When it is created we wait for the default egress rule to be added by AWS + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + elif changed and not module.check_mode: + # keep pulling until current security group rules match the desired ingress and egress rules + security_group = wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress) + else: + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags']) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', [])) + + else: + security_group = {'group_id': None} + + if module._diff: + if module.params['state'] == 'present': + after = get_diff_final_resource(client, module, security_group) + if before.get('ip_permissions'): + before['ip_permissions'].sort(key=get_ip_permissions_sort_key) + + security_group['diff'] = [{'before': before, 'after': after}] + + module.exit_json(changed=changed, **security_group) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_facts.py new file mode 100644 index 00000000..228b82d9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_facts.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_group_info +version_added: 1.0.0 +short_description: Gather information about ec2 security groups in AWS. +description: + - Gather information about ec2 security groups in AWS. + - This module was called C(amazon.aws.ec2_group_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: +- Henrique Rodrigues (@Sodki) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for + possible filters. Filter names and values are case sensitive. You can also use underscores (_) + instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + required: false + default: {} + type: dict +notes: + - By default, the module will return all security groups. To limit results use the appropriate filters. + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all security groups +- amazon.aws.ec2_group_info: + +# Gather information about all security groups in a specific VPC +- amazon.aws.ec2_group_info: + filters: + vpc-id: vpc-12345678 + +# Gather information about all security groups in a specific VPC +- amazon.aws.ec2_group_info: + filters: + vpc-id: vpc-12345678 + +# Gather information about a security group +- amazon.aws.ec2_group_info: + filters: + group-name: example-1 + +# Gather information about a security group by id +- amazon.aws.ec2_group_info: + filters: + group-id: sg-12345678 + +# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys +- amazon.aws.ec2_group_info: + filters: + group_id: sg-12345678 + vpc-id: vpc-12345678 + +# Gather information about various security groups +- amazon.aws.ec2_group_info: + filters: + group-name: + - example-1 + - example-2 + - example-3 + +# Gather information about any security group with a tag key Name and value Example. +# The quotes around 'tag:name' are important because of the colon in the value +- amazon.aws.ec2_group_info: + filters: + "tag:Name": Example +''' + +RETURN = ''' +security_groups: + description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group. + type: list + returned: always + sample: +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_group_facts': + module.deprecate("The 'ec2_group_facts' module has been renamed to 'ec2_group_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags + filters = module.params.get("filters") + sanitized_filters = dict() + + for key in filters: + if key.startswith("tag:"): + sanitized_filters[key] = filters[key] + else: + sanitized_filters[key.replace("_", "-")] = filters[key] + + try: + security_groups = connection.describe_security_groups( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to describe security groups') + + snaked_security_groups = [] + for security_group in security_groups['SecurityGroups']: + # Modify boto3 tags list to be ansible friendly dict + # but don't camel case tags + security_group = camel_dict_to_snake_dict(security_group) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value') + snaked_security_groups.append(security_group) + + module.exit_json(security_groups=snaked_security_groups) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_info.py new file mode 100644 index 00000000..228b82d9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_info.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_group_info +version_added: 1.0.0 +short_description: Gather information about ec2 security groups in AWS. +description: + - Gather information about ec2 security groups in AWS. + - This module was called C(amazon.aws.ec2_group_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: +- Henrique Rodrigues (@Sodki) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for + possible filters. Filter names and values are case sensitive. You can also use underscores (_) + instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + required: false + default: {} + type: dict +notes: + - By default, the module will return all security groups. To limit results use the appropriate filters. + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all security groups +- amazon.aws.ec2_group_info: + +# Gather information about all security groups in a specific VPC +- amazon.aws.ec2_group_info: + filters: + vpc-id: vpc-12345678 + +# Gather information about all security groups in a specific VPC +- amazon.aws.ec2_group_info: + filters: + vpc-id: vpc-12345678 + +# Gather information about a security group +- amazon.aws.ec2_group_info: + filters: + group-name: example-1 + +# Gather information about a security group by id +- amazon.aws.ec2_group_info: + filters: + group-id: sg-12345678 + +# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys +- amazon.aws.ec2_group_info: + filters: + group_id: sg-12345678 + vpc-id: vpc-12345678 + +# Gather information about various security groups +- amazon.aws.ec2_group_info: + filters: + group-name: + - example-1 + - example-2 + - example-3 + +# Gather information about any security group with a tag key Name and value Example. +# The quotes around 'tag:name' are important because of the colon in the value +- amazon.aws.ec2_group_info: + filters: + "tag:Name": Example +''' + +RETURN = ''' +security_groups: + description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group. + type: list + returned: always + sample: +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_group_facts': + module.deprecate("The 'ec2_group_facts' module has been renamed to 'ec2_group_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags + filters = module.params.get("filters") + sanitized_filters = dict() + + for key in filters: + if key.startswith("tag:"): + sanitized_filters[key] = filters[key] + else: + sanitized_filters[key.replace("_", "-")] = filters[key] + + try: + security_groups = connection.describe_security_groups( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to describe security groups') + + snaked_security_groups = [] + for security_group in security_groups['SecurityGroups']: + # Modify boto3 tags list to be ansible friendly dict + # but don't camel case tags + security_group = camel_dict_to_snake_dict(security_group) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value') + snaked_security_groups.append(security_group) + + module.exit_json(security_groups=snaked_security_groups) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_key.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_key.py new file mode 100644 index 00000000..815130f9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_key.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_key +version_added: 1.0.0 +short_description: create or delete an ec2 key pair +description: + - create or delete an ec2 key pair. +options: + name: + description: + - Name of the key pair. + required: true + type: str + key_material: + description: + - Public key material. + required: false + type: str + force: + description: + - Force overwrite of already existing key pair if key has changed. + required: false + default: true + type: bool + state: + description: + - create or delete keypair + required: false + choices: [ present, absent ] + default: 'present' + type: str + wait: + description: + - This option has no effect since version 2.5 and will be removed after 2022-06-01. + type: bool + wait_timeout: + description: + - This option has no effect since version 2.5 and will be removed after 2022-06-01. + type: int + required: false + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +requirements: [ boto3 ] +author: + - "Vincent Viallet (@zbal)" + - "Prasad Katti (@prasadkatti)" +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: create a new ec2 key pair, returns generated private key + amazon.aws.ec2_key: + name: my_keypair + +- name: create key pair using provided key_material + amazon.aws.ec2_key: + name: my_keypair + key_material: 'ssh-rsa AAAAxyz...== me@example.com' + +- name: create key pair using key_material obtained using 'file' lookup plugin + amazon.aws.ec2_key: + name: my_keypair + key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}" + +# try creating a key pair with the name of an already existing keypair +# but don't overwrite it even if the key is different (force=false) +- name: try creating a key pair with name of an already existing keypair + amazon.aws.ec2_key: + name: my_existing_keypair + key_material: 'ssh-rsa AAAAxyz...== me@example.com' + force: false + +- name: remove key pair by name + amazon.aws.ec2_key: + name: my_keypair + state: absent +''' + +RETURN = ''' +changed: + description: whether a keypair was created/deleted + returned: always + type: bool + sample: true +msg: + description: short message describing the action taken + returned: always + type: str + sample: key pair created +key: + description: details of the keypair (this is set to null when state is absent) + returned: always + type: complex + contains: + fingerprint: + description: fingerprint of the key + returned: when state is present + type: str + sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43' + name: + description: name of the keypair + returned: when state is present + type: str + sample: my_keypair + private_key: + description: private key of a newly created keypair + returned: when a new keypair is created by AWS (key_material is not provided) + type: str + sample: '-----BEGIN RSA PRIVATE KEY----- + MIIEowIBAAKC... + -----END RSA PRIVATE KEY-----' +''' + +import uuid + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils._text import to_bytes + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry + + +def extract_key_data(key): + + data = { + 'name': key['KeyName'], + 'fingerprint': key['KeyFingerprint'] + } + if 'KeyMaterial' in key: + data['private_key'] = key['KeyMaterial'] + return data + + +def get_key_fingerprint(module, ec2_client, key_material): + ''' + EC2's fingerprints are non-trivial to generate, so push this key + to a temporary name and make ec2 calculate the fingerprint for us. + http://blog.jbrowne.com/?p=23 + https://forums.aws.amazon.com/thread.jspa?messageID=352828 + ''' + + # find an unused name + name_in_use = True + while name_in_use: + random_name = "ansible-" + str(uuid.uuid4()) + name_in_use = find_key_pair(module, ec2_client, random_name) + + temp_key = import_key_pair(module, ec2_client, random_name, key_material) + delete_key_pair(module, ec2_client, random_name, finish_task=False) + return temp_key['KeyFingerprint'] + + +def find_key_pair(module, ec2_client, name): + + try: + key = ec2_client.describe_key_pairs(aws_retry=True, KeyNames=[name])['KeyPairs'][0] + except is_boto3_error_code('InvalidKeyPair.NotFound'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: + module.fail_json_aws(err, msg="error finding keypair") + except IndexError: + key = None + return key + + +def create_key_pair(module, ec2_client, name, key_material, force): + + key = find_key_pair(module, ec2_client, name) + if key: + if key_material and force: + if not module.check_mode: + new_fingerprint = get_key_fingerprint(module, ec2_client, key_material) + if key['KeyFingerprint'] != new_fingerprint: + delete_key_pair(module, ec2_client, name, finish_task=False) + key = import_key_pair(module, ec2_client, name, key_material) + key_data = extract_key_data(key) + module.exit_json(changed=True, key=key_data, msg="key pair updated") + else: + # Assume a change will be made in check mode since a comparison can't be done + module.exit_json(changed=True, key=extract_key_data(key), msg="key pair updated") + key_data = extract_key_data(key) + module.exit_json(changed=False, key=key_data, msg="key pair already exists") + else: + # key doesn't exist, create it now + key_data = None + if not module.check_mode: + if key_material: + key = import_key_pair(module, ec2_client, name, key_material) + else: + try: + key = ec2_client.create_key_pair(aws_retry=True, KeyName=name) + except botocore.exceptions.ClientError as err: + module.fail_json_aws(err, msg="error creating key") + key_data = extract_key_data(key) + module.exit_json(changed=True, key=key_data, msg="key pair created") + + +def import_key_pair(module, ec2_client, name, key_material): + + try: + key = ec2_client.import_key_pair(aws_retry=True, KeyName=name, PublicKeyMaterial=to_bytes(key_material)) + except botocore.exceptions.ClientError as err: + module.fail_json_aws(err, msg="error importing key") + return key + + +def delete_key_pair(module, ec2_client, name, finish_task=True): + + key = find_key_pair(module, ec2_client, name) + if key: + if not module.check_mode: + try: + ec2_client.delete_key_pair(aws_retry=True, KeyName=name) + except botocore.exceptions.ClientError as err: + module.fail_json_aws(err, msg="error deleting key") + if not finish_task: + return + module.exit_json(changed=True, key=None, msg="key deleted") + module.exit_json(key=None, msg="key did not exist") + + +def main(): + + argument_spec = dict( + name=dict(required=True), + key_material=dict(), + force=dict(type='bool', default=True), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='amazon.aws'), + wait_timeout=dict(type='int', removed_at_date='2022-06-01', removed_from_collection='amazon.aws') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + name = module.params['name'] + state = module.params.get('state') + key_material = module.params.get('key_material') + force = module.params.get('force') + + if state == 'absent': + delete_key_pair(module, ec2_client, name) + elif state == 'present': + create_key_pair(module, ec2_client, name, key_material, force) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py new file mode 100644 index 00000000..e871f2d9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py @@ -0,0 +1,563 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_metadata_facts +version_added: 1.0.0 +short_description: Gathers facts (instance metadata) about remote hosts within ec2 +author: + - Silviu Dicu (@silviud) + - Vinay Dandekar (@roadmapper) +description: + - This module fetches data from the instance metadata endpoint in ec2 as per + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + - The module must be called from within the EC2 instance itself. +notes: + - Parameters to filter on ec2_metadata_facts may be added later. +''' + +EXAMPLES = ''' +# Gather EC2 metadata facts +- amazon.aws.ec2_metadata_facts: + +- debug: + msg: "This instance is a t1.micro" + when: ansible_ec2_instance_type == "t1.micro" +''' + +RETURN = ''' +ansible_facts: + description: Dictionary of new facts representing discovered properties of the EC2 instance. + returned: changed + type: complex + contains: + ansible_ec2_ami_id: + description: The AMI ID used to launch the instance. + type: str + sample: "ami-XXXXXXXX" + ansible_ec2_ami_launch_index: + description: + - If you started more than one instance at the same time, this value indicates the order in which the instance was launched. + - The value of the first instance launched is 0. + type: str + sample: "0" + ansible_ec2_ami_manifest_path: + description: + - The path to the AMI manifest file in Amazon S3. + - If you used an Amazon EBS-backed AMI to launch the instance, the returned result is unknown. + type: str + sample: "(unknown)" + ansible_ec2_ancestor_ami_ids: + description: + - The AMI IDs of any instances that were rebundled to create this AMI. + - This value will only exist if the AMI manifest file contained an ancestor-amis key. + type: str + sample: "(unknown)" + ansible_ec2_block_device_mapping_ami: + description: The virtual device that contains the root/boot file system. + type: str + sample: "/dev/sda1" + ansible_ec2_block_device_mapping_ebsN: + description: + - The virtual devices associated with Amazon EBS volumes, if any are present. + - Amazon EBS volumes are only available in metadata if they were present at launch time or when the instance was last started. + - The N indicates the index of the Amazon EBS volume (such as ebs1 or ebs2). + type: str + sample: "/dev/xvdb" + ansible_ec2_block_device_mapping_ephemeralN: + description: The virtual devices associated with ephemeral devices, if any are present. The N indicates the index of the ephemeral volume. + type: str + sample: "/dev/xvdc" + ansible_ec2_block_device_mapping_root: + description: + - The virtual devices or partitions associated with the root devices, or partitions on the virtual device, + where the root (/ or C) file system is associated with the given instance. + type: str + sample: "/dev/sda1" + ansible_ec2_block_device_mapping_swap: + description: The virtual devices associated with swap. Not always present. + type: str + sample: "/dev/sda2" + ansible_ec2_fws_instance_monitoring: + description: "Value showing whether the customer has enabled detailed one-minute monitoring in CloudWatch." + type: str + sample: "enabled" + ansible_ec2_hostname: + description: + - The private IPv4 DNS hostname of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "ip-10-0-0-1.ec2.internal" + ansible_ec2_iam_info: + description: + - If there is an IAM role associated with the instance, contains information about the last time the instance profile was updated, + including the instance's LastUpdated date, InstanceProfileArn, and InstanceProfileId. Otherwise, not present. + type: complex + sample: "" + contains: + LastUpdated: + description: The last time which InstanceProfile is associated with the Instance changed. + type: str + InstanceProfileArn: + description: The ARN of the InstanceProfile associated with the Instance. + type: str + InstanceProfileId: + description: The Id of the InstanceProfile associated with the Instance. + type: str + ansible_ec2_iam_info_instanceprofilearn: + description: The IAM instance profile ARN. + type: str + sample: "arn:aws:iam::<account id>:instance-profile/<role name>" + ansible_ec2_iam_info_instanceprofileid: + description: IAM instance profile ID. + type: str + sample: "" + ansible_ec2_iam_info_lastupdated: + description: IAM info last updated time. + type: str + sample: "2017-05-12T02:42:27Z" + ansible_ec2_iam_instance_profile_role: + description: IAM instance role. + type: str + sample: "role_name" + ansible_ec2_iam_security_credentials_<role name>: + description: + - If there is an IAM role associated with the instance, role-name is the name of the role, + and role-name contains the temporary security credentials associated with the role. Otherwise, not present. + type: str + sample: "" + ansible_ec2_iam_security_credentials_<role name>_accesskeyid: + description: IAM role access key ID. + type: str + sample: "" + ansible_ec2_iam_security_credentials_<role name>_code: + description: IAM code. + type: str + sample: "Success" + ansible_ec2_iam_security_credentials_<role name>_expiration: + description: IAM role credentials expiration time. + type: str + sample: "2017-05-12T09:11:41Z" + ansible_ec2_iam_security_credentials_<role name>_lastupdated: + description: IAM role last updated time. + type: str + sample: "2017-05-12T02:40:44Z" + ansible_ec2_iam_security_credentials_<role name>_secretaccesskey: + description: IAM role secret access key. + type: str + sample: "" + ansible_ec2_iam_security_credentials_<role name>_token: + description: IAM role token. + type: str + sample: "" + ansible_ec2_iam_security_credentials_<role name>_type: + description: IAM role type. + type: str + sample: "AWS-HMAC" + ansible_ec2_instance_action: + description: Notifies the instance that it should reboot in preparation for bundling. + type: str + sample: "none" + ansible_ec2_instance_id: + description: The ID of this instance. + type: str + sample: "i-XXXXXXXXXXXXXXXXX" + ansible_ec2_instance_identity_document: + description: JSON containing instance attributes, such as instance-id, private IP address, etc. + type: str + sample: "" + ansible_ec2_instance_identity_document_accountid: + description: "" + type: str + sample: "012345678901" + ansible_ec2_instance_identity_document_architecture: + description: Instance system architecture. + type: str + sample: "x86_64" + ansible_ec2_instance_identity_document_availabilityzone: + description: The Availability Zone in which the instance launched. + type: str + sample: "us-east-1a" + ansible_ec2_instance_identity_document_billingproducts: + description: Billing products for this instance. + type: str + sample: "" + ansible_ec2_instance_identity_document_devpayproductcodes: + description: Product codes for the launched AMI. + type: str + sample: "" + ansible_ec2_instance_identity_document_imageid: + description: The AMI ID used to launch the instance. + type: str + sample: "ami-01234567" + ansible_ec2_instance_identity_document_instanceid: + description: The ID of this instance. + type: str + sample: "i-0123456789abcdef0" + ansible_ec2_instance_identity_document_instancetype: + description: The type of instance. + type: str + sample: "m4.large" + ansible_ec2_instance_identity_document_kernelid: + description: The ID of the kernel launched with this instance, if applicable. + type: str + sample: "" + ansible_ec2_instance_identity_document_pendingtime: + description: The instance pending time. + type: str + sample: "2017-05-11T20:51:20Z" + ansible_ec2_instance_identity_document_privateip: + description: + - The private IPv4 address of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "10.0.0.1" + ansible_ec2_instance_identity_document_ramdiskid: + description: The ID of the RAM disk specified at launch time, if applicable. + type: str + sample: "" + ansible_ec2_instance_identity_document_region: + description: The Region in which the instance launched. + type: str + sample: "us-east-1" + ansible_ec2_instance_identity_document_version: + description: Identity document version. + type: str + sample: "2010-08-31" + ansible_ec2_instance_identity_pkcs7: + description: Used to verify the document's authenticity and content against the signature. + type: str + sample: "" + ansible_ec2_instance_identity_rsa2048: + description: Used to verify the document's authenticity and content against the signature. + type: str + sample: "" + ansible_ec2_instance_identity_signature: + description: Data that can be used by other parties to verify its origin and authenticity. + type: str + sample: "" + ansible_ec2_instance_life_cycle: + description: The purchasing option of the instance. + type: str + sample: "on-demand" + ansible_ec2_instance_type: + description: The type of the instance. + type: str + sample: "m4.large" + ansible_ec2_local_hostname: + description: + - The private IPv4 DNS hostname of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "ip-10-0-0-1.ec2.internal" + ansible_ec2_local_ipv4: + description: + - The private IPv4 address of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "10.0.0.1" + ansible_ec2_mac: + description: + - The instance's media access control (MAC) address. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "00:11:22:33:44:55" + ansible_ec2_metrics_vhostmd: + description: Metrics. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_<mac address>_device_number: + description: + - The unique device number associated with that interface. The device number corresponds to the device name; + for example, a device-number of 2 is for the eth2 device. + - This category corresponds to the DeviceIndex and device-index fields that are used by the Amazon EC2 API and the EC2 commands for the AWS CLI. + type: str + sample: "0" + ansible_ec2_network_interfaces_macs_<mac address>_interface_id: + description: The elastic network interface ID. + type: str + sample: "eni-12345678" + ansible_ec2_network_interfaces_macs_<mac address>_ipv4_associations_<ip address>: + description: The private IPv4 addresses that are associated with each public-ip address and assigned to that interface. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_<mac address>_ipv6s: + description: The IPv6 addresses associated with the interface. Returned only for instances launched into a VPC. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_<mac address>_local_hostname: + description: The interface's local hostname. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_<mac address>_local_ipv4s: + description: The private IPv4 addresses associated with the interface. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_<mac address>_mac: + description: The instance's MAC address. + type: str + sample: "00:11:22:33:44:55" + ansible_ec2_network_interfaces_macs_<mac address>_owner_id: + description: + - The ID of the owner of the network interface. + - In multiple-interface environments, an interface can be attached by a third party, such as Elastic Load Balancing. + - Traffic on an interface is always billed to the interface owner. + type: str + sample: "01234567890" + ansible_ec2_network_interfaces_macs_<mac address>_public_hostname: + description: + - The interface's public DNS (IPv4). If the instance is in a VPC, + this category is only returned if the enableDnsHostnames attribute is set to true. + type: str + sample: "ec2-1-2-3-4.compute-1.amazonaws.com" + ansible_ec2_network_interfaces_macs_<mac address>_public_ipv4s: + description: The Elastic IP addresses associated with the interface. There may be multiple IPv4 addresses on an instance. + type: str + sample: "1.2.3.4" + ansible_ec2_network_interfaces_macs_<mac address>_security_group_ids: + description: The IDs of the security groups to which the network interface belongs. Returned only for instances launched into a VPC. + type: str + sample: "sg-01234567,sg-01234568" + ansible_ec2_network_interfaces_macs_<mac address>_security_groups: + description: Security groups to which the network interface belongs. Returned only for instances launched into a VPC. + type: str + sample: "secgroup1,secgroup2" + ansible_ec2_network_interfaces_macs_<mac address>_subnet_id: + description: The ID of the subnet in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "subnet-01234567" + ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv4_cidr_block: + description: The IPv4 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "10.0.1.0/24" + ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv6_cidr_blocks: + description: The IPv6 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_<mac address>_vpc_id: + description: The ID of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "vpc-0123456" + ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_block: + description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "10.0.0.0/16" + ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_blocks: + description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "10.0.0.0/16" + ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv6_cidr_blocks: + description: The IPv6 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "" + ansible_ec2_placement_availability_zone: + description: The Availability Zone in which the instance launched. + type: str + sample: "us-east-1a" + ansible_ec2_placement_region: + description: The Region in which the instance launched. + type: str + sample: "us-east-1" + ansible_ec2_product_codes: + description: Product codes associated with the instance, if any. + type: str + sample: "aw0evgkw8e5c1q413zgy5pjce" + ansible_ec2_profile: + description: EC2 instance hardware profile. + type: str + sample: "default-hvm" + ansible_ec2_public_hostname: + description: + - The instance's public DNS. If the instance is in a VPC, this category is only returned if the enableDnsHostnames attribute is set to true. + type: str + sample: "ec2-1-2-3-4.compute-1.amazonaws.com" + ansible_ec2_public_ipv4: + description: The public IPv4 address. If an Elastic IP address is associated with the instance, the value returned is the Elastic IP address. + type: str + sample: "1.2.3.4" + ansible_ec2_public_key: + description: Public key. Only available if supplied at instance launch time. + type: str + sample: "" + ansible_ec2_ramdisk_id: + description: The ID of the RAM disk specified at launch time, if applicable. + type: str + sample: "" + ansible_ec2_reservation_id: + description: The ID of the reservation. + type: str + sample: "r-0123456789abcdef0" + ansible_ec2_security_groups: + description: + - The names of the security groups applied to the instance. After launch, you can only change the security groups of instances running in a VPC. + - Such changes are reflected here and in network/interfaces/macs/mac/security-groups. + type: str + sample: "securitygroup1,securitygroup2" + ansible_ec2_services_domain: + description: The domain for AWS resources for the region; for example, amazonaws.com for us-east-1. + type: str + sample: "amazonaws.com" + ansible_ec2_services_partition: + description: + - The partition that the resource is in. For standard AWS regions, the partition is aws. + - If you have resources in other partitions, the partition is aws-partitionname. + - For example, the partition for resources in the China (Beijing) region is aws-cn. + type: str + sample: "aws" + ansible_ec2_spot_termination_time: + description: + - The approximate time, in UTC, that the operating system for your Spot instance will receive the shutdown signal. + - This item is present and contains a time value only if the Spot instance has been marked for termination by Amazon EC2. + - The termination-time item is not set to a time if you terminated the Spot instance yourself. + type: str + sample: "2015-01-05T18:02:00Z" + ansible_ec2_user_data: + description: The instance user data. + type: str + sample: "#!/bin/bash" +''' + +import json +import re +import socket +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import quote + +socket.setdefaulttimeout(5) + + +class Ec2Metadata(object): + ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' + ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' + ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' + ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/' + + def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None, ec2_dynamicdata_uri=None): + self.module = module + self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri + self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri + self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri + self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri + self._data = {} + self._prefix = 'ansible_ec2_%s' + + def _fetch(self, url): + encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]') + response, info = fetch_url(self.module, encoded_url, force=True) + + if info.get('status') not in (200, 404): + time.sleep(3) + # request went bad, retry once then raise + self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg'])) + response, info = fetch_url(self.module, encoded_url, force=True) + if info.get('status') not in (200, 404): + # fail out now + self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info) + if response: + data = response.read() + else: + data = None + return to_text(data) + + def _mangle_fields(self, fields, uri, filter_patterns=None): + filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns + + new_fields = {} + for key, value in fields.items(): + split_fields = key[len(uri):].split('/') + # Parse out the IAM role name (which is _not_ the same as the instance profile name) + if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]: + new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2] + if len(split_fields) > 1 and split_fields[1]: + new_key = "-".join(split_fields) + new_fields[self._prefix % new_key] = value + else: + new_key = "".join(split_fields) + new_fields[self._prefix % new_key] = value + for pattern in filter_patterns: + for key in dict(new_fields): + match = re.search(pattern, key) + if match: + new_fields.pop(key) + return new_fields + + def fetch(self, uri, recurse=True): + raw_subfields = self._fetch(uri) + if not raw_subfields: + return + subfields = raw_subfields.split('\n') + for field in subfields: + if field.endswith('/') and recurse: + self.fetch(uri + field) + if uri.endswith('/'): + new_uri = uri + field + else: + new_uri = uri + '/' + field + if new_uri not in self._data and not new_uri.endswith('/'): + content = self._fetch(new_uri) + if field == 'security-groups' or field == 'security-group-ids': + sg_fields = ",".join(content.split('\n')) + self._data['%s' % (new_uri)] = sg_fields + else: + try: + dict = json.loads(content) + self._data['%s' % (new_uri)] = content + for (key, value) in dict.items(): + self._data['%s:%s' % (new_uri, key.lower())] = value + except Exception: + self._data['%s' % (new_uri)] = content # not a stringified JSON string + + def fix_invalid_varnames(self, data): + """Change ':'' and '-' to '_' to ensure valid template variable names""" + new_data = data.copy() + for key, value in data.items(): + if ':' in key or '-' in key: + newkey = re.sub(':|-', '_', key) + new_data[newkey] = value + del new_data[key] + + return new_data + + def run(self): + self.fetch(self.uri_meta) # populate _data with metadata + data = self._mangle_fields(self._data, self.uri_meta) + data[self._prefix % 'user-data'] = self._fetch(self.uri_user) + data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) + + self._data = {} # clear out metadata in _data + self.fetch(self.uri_dynamic) # populate _data with dynamic data + dyndata = self._mangle_fields(self._data, self.uri_dynamic) + data.update(dyndata) + data = self.fix_invalid_varnames(data) + + # Maintain old key for backwards compatibility + if 'ansible_ec2_instance_identity_document_region' in data: + data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region'] + return data + + +def main(): + module = AnsibleModule( + argument_spec={}, + supports_check_mode=True, + ) + + ec2_metadata_facts = Ec2Metadata(module).run() + ec2_metadata_facts_result = dict(changed=False, ansible_facts=ec2_metadata_facts) + + module.exit_json(**ec2_metadata_facts_result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py new file mode 100644 index 00000000..cf4762dd --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_snapshot +version_added: 1.0.0 +short_description: Creates a snapshot from an existing volume +description: + - Creates an EC2 snapshot from an existing EBS volume. +options: + volume_id: + description: + - Volume from which to take the snapshot. + required: false + type: str + description: + description: + - Description to be applied to the snapshot. + required: false + type: str + instance_id: + description: + - Instance that has the required volume to snapshot mounted. + required: false + type: str + device_name: + description: + - Device name of a mounted volume to be snapshotted. + required: false + type: str + snapshot_tags: + description: + - A dictionary of tags to add to the snapshot. + type: dict + required: false + wait: + description: + - Wait for the snapshot to be ready. + type: bool + required: false + default: yes + wait_timeout: + description: + - How long before wait gives up, in seconds. + - Specify 0 to wait forever. + required: false + default: 0 + type: int + state: + description: + - Whether to add or create a snapshot. + required: false + default: present + choices: ['absent', 'present'] + type: str + snapshot_id: + description: + - Snapshot id to remove. + required: false + type: str + last_snapshot_min_age: + description: + - If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created. + required: false + default: 0 + type: int + +author: "Will Thames (@willthames)" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Simple snapshot of volume using volume_id +- amazon.aws.ec2_snapshot: + volume_id: vol-abcdef12 + description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 + +# Snapshot of volume mounted on device_name attached to instance_id +- amazon.aws.ec2_snapshot: + instance_id: i-12345678 + device_name: /dev/sdb1 + description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 + +# Snapshot of volume with tagging +- amazon.aws.ec2_snapshot: + instance_id: i-12345678 + device_name: /dev/sdb1 + snapshot_tags: + frequency: hourly + source: /data + +# Remove a snapshot +- amazon.aws.ec2_snapshot: + snapshot_id: snap-abcd1234 + state: absent + +# Create a snapshot only if the most recent one is older than 1 hour +- amazon.aws.ec2_snapshot: + volume_id: vol-abcdef12 + last_snapshot_min_age: 60 +''' + +RETURN = ''' +snapshot_id: + description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. + type: str + returned: always + sample: snap-01234567 +tags: + description: Any tags assigned to the snapshot. + type: dict + returned: always + sample: "{ 'Name': 'instance-name' }" +volume_id: + description: The ID of the volume that was used to create the snapshot. + type: str + returned: always + sample: vol-01234567 +volume_size: + description: The size of the volume, in GiB. + type: int + returned: always + sample: 8 +''' + +import time +import datetime + +try: + import boto.exception +except ImportError: + pass # Taken care of by ec2.HAS_BOTO + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import HAS_BOTO +from ..module_utils.ec2 import ec2_connect + + +# Find the most recent snapshot +def _get_snapshot_starttime(snap): + return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.%fZ') + + +def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): + """ + Gets the most recently created snapshot and optionally filters the result + if the snapshot is too old + :param snapshots: list of snapshots to search + :param max_snapshot_age_secs: filter the result if its older than this + :param now: simulate time -- used for unit testing + :return: + """ + if len(snapshots) == 0: + return None + + if not now: + now = datetime.datetime.utcnow() + + youngest_snapshot = max(snapshots, key=_get_snapshot_starttime) + + # See if the snapshot is younger that the given max age + snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.%fZ') + snapshot_age = now - snapshot_start + + if max_snapshot_age_secs is not None: + if snapshot_age.total_seconds() > max_snapshot_age_secs: + return None + + return youngest_snapshot + + +def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep): + """ + Wait for the snapshot to be created + :param snapshot: + :param wait_timeout_secs: fail this step after this many seconds + :param sleep_func: + :return: + """ + time_waited = 0 + snapshot.update() + while snapshot.status != 'completed': + sleep_func(3) + snapshot.update() + time_waited += 3 + if wait_timeout_secs and time_waited > wait_timeout_secs: + return False + return True + + +def create_snapshot(module, ec2, state=None, description=None, wait=None, + wait_timeout=None, volume_id=None, instance_id=None, + snapshot_id=None, device_name=None, snapshot_tags=None, + last_snapshot_min_age=None): + snapshot = None + changed = False + + required = [volume_id, snapshot_id, instance_id] + if required.count(None) != len(required) - 1: # only 1 must be set + module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified') + if instance_id and not device_name or device_name and not instance_id: + module.fail_json(msg='Instance ID and device name must both be specified') + + if instance_id: + try: + volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name}) + except boto.exception.BotoServerError as e: + module.fail_json_aws(e) + + if not volumes: + module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id)) + + volume_id = volumes[0].id + + if state == 'absent': + if not snapshot_id: + module.fail_json(msg='snapshot_id must be set when state is absent') + try: + ec2.delete_snapshot(snapshot_id) + except boto.exception.BotoServerError as e: + # exception is raised if snapshot does not exist + if e.error_code == 'InvalidSnapshot.NotFound': + module.exit_json(changed=False) + else: + module.fail_json_aws(e) + + # successful delete + module.exit_json(changed=True) + + if last_snapshot_min_age > 0: + try: + current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id}) + except boto.exception.BotoServerError as e: + module.fail_json_aws(e) + + last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds + snapshot = _get_most_recent_snapshot(current_snapshots, + max_snapshot_age_secs=last_snapshot_min_age) + try: + # Create a new snapshot if we didn't find an existing one to use + if snapshot is None: + snapshot = ec2.create_snapshot(volume_id, description=description) + changed = True + if wait: + if not _create_with_wait(snapshot, wait_timeout): + module.fail_json(msg='Timed out while creating snapshot.') + if snapshot_tags: + for k, v in snapshot_tags.items(): + snapshot.add_tag(k, v) + except boto.exception.BotoServerError as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed, + snapshot_id=snapshot.id, + volume_id=snapshot.volume_id, + volume_size=snapshot.volume_size, + tags=snapshot.tags.copy()) + + +def create_snapshot_ansible_module(): + argument_spec = dict( + volume_id=dict(), + description=dict(), + instance_id=dict(), + snapshot_id=dict(), + device_name=dict(), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=0), + last_snapshot_min_age=dict(type='int', default=0), + snapshot_tags=dict(type='dict', default=dict()), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) + return module + + +def main(): + module = create_snapshot_ansible_module() + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + volume_id = module.params.get('volume_id') + snapshot_id = module.params.get('snapshot_id') + description = module.params.get('description') + instance_id = module.params.get('instance_id') + device_name = module.params.get('device_name') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + last_snapshot_min_age = module.params.get('last_snapshot_min_age') + snapshot_tags = module.params.get('snapshot_tags') + state = module.params.get('state') + + ec2 = ec2_connect(module) + + create_snapshot( + module=module, + state=state, + description=description, + wait=wait, + wait_timeout=wait_timeout, + ec2=ec2, + volume_id=volume_id, + instance_id=instance_id, + snapshot_id=snapshot_id, + device_name=device_name, + snapshot_tags=snapshot_tags, + last_snapshot_min_age=last_snapshot_min_age + ) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_facts.py new file mode 100644 index 00000000..d2b29f04 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_facts.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_snapshot_info +version_added: 1.0.0 +short_description: Gather information about ec2 volume snapshots in AWS +description: + - Gather information about ec2 volume snapshots in AWS. + - This module was called C(ec2_snapshot_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + snapshot_ids: + description: + - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. + required: false + default: [] + type: list + elements: str + owner_ids: + description: + - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have + access are returned. + required: false + default: [] + type: list + elements: str + restorable_by_user_ids: + description: + - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are + returned. + required: false + default: [] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter + names and values are case sensitive. + required: false + type: dict + default: {} +notes: + - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by + the account use the filter 'owner-id'. + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all snapshots, including public ones +- amazon.aws.ec2_snapshot_info: + +# Gather information about all snapshots owned by the account 0123456789 +- amazon.aws.ec2_snapshot_info: + filters: + owner-id: 0123456789 + +# Or alternatively... +- amazon.aws.ec2_snapshot_info: + owner_ids: + - 0123456789 + +# Gather information about a particular snapshot using ID +- amazon.aws.ec2_snapshot_info: + filters: + snapshot-id: snap-00112233 + +# Or alternatively... +- amazon.aws.ec2_snapshot_info: + snapshot_ids: + - snap-00112233 + +# Gather information about any snapshot with a tag key Name and value Example +- amazon.aws.ec2_snapshot_info: + filters: + "tag:Name": Example + +# Gather information about any snapshot with an error status +- amazon.aws.ec2_snapshot_info: + filters: + status: error + +''' + +RETURN = ''' +snapshot_id: + description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. + type: str + returned: always + sample: snap-01234567 +volume_id: + description: The ID of the volume that was used to create the snapshot. + type: str + returned: always + sample: vol-01234567 +state: + description: The snapshot state (completed, pending or error). + type: str + returned: always + sample: completed +state_message: + description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper + AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the + error occurred. + type: str + returned: always + sample: +start_time: + description: The time stamp when the snapshot was initiated. + type: str + returned: always + sample: "2015-02-12T02:14:02+00:00" +progress: + description: The progress of the snapshot, as a percentage. + type: str + returned: always + sample: "100%" +owner_id: + description: The AWS account ID of the EBS snapshot owner. + type: str + returned: always + sample: "099720109477" +description: + description: The description for the snapshot. + type: str + returned: always + sample: "My important backup" +volume_size: + description: The size of the volume, in GiB. + type: int + returned: always + sample: 8 +owner_alias: + description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot. + type: str + returned: always + sample: "033440102211" +tags: + description: Any tags assigned to the snapshot. + type: dict + returned: always + sample: "{ 'my_tag_key': 'my_tag_value' }" +encrypted: + description: Indicates whether the snapshot is encrypted. + type: bool + returned: always + sample: "True" +kms_key_id: + description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \ + protect the volume encryption key for the parent volume. + type: str + returned: always + sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456" +data_encryption_key_id: + description: The data encryption key identifier for the snapshot. This value is a unique identifier that \ + corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. + type: str + returned: always + sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456" + +''' + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_ec2_snapshots(connection, module): + + snapshot_ids = module.params.get("snapshot_ids") + owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")] + restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")] + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + snapshots = connection.describe_snapshots( + aws_retry=True, + SnapshotIds=snapshot_ids, OwnerIds=owner_ids, + RestorableByUserIds=restorable_by_user_ids, Filters=filters) + except is_boto3_error_code('InvalidSnapshot.NotFound') as e: + if len(snapshot_ids) > 1: + module.warn("Some of your snapshots may exist, but %s" % str(e)) + snapshots = {'Snapshots': []} + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to describe snapshots') + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_snapshots = [] + for snapshot in snapshots['Snapshots']: + snaked_snapshots.append(camel_dict_to_snake_dict(snapshot)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for snapshot in snaked_snapshots: + if 'tags' in snapshot: + snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value') + + module.exit_json(snapshots=snaked_snapshots) + + +def main(): + + argument_spec = dict( + snapshot_ids=dict(default=[], type='list', elements='str'), + owner_ids=dict(default=[], type='list', elements='str'), + restorable_by_user_ids=dict(default=[], type='list', elements='str'), + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'] + ], + supports_check_mode=True + ) + if module._name == 'ec2_snapshot_facts': + module.deprecate("The 'ec2_snapshot_facts' module has been renamed to 'ec2_snapshot_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_ec2_snapshots(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py new file mode 100644 index 00000000..d2b29f04 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_snapshot_info +version_added: 1.0.0 +short_description: Gather information about ec2 volume snapshots in AWS +description: + - Gather information about ec2 volume snapshots in AWS. + - This module was called C(ec2_snapshot_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + snapshot_ids: + description: + - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. + required: false + default: [] + type: list + elements: str + owner_ids: + description: + - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have + access are returned. + required: false + default: [] + type: list + elements: str + restorable_by_user_ids: + description: + - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are + returned. + required: false + default: [] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter + names and values are case sensitive. + required: false + type: dict + default: {} +notes: + - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by + the account use the filter 'owner-id'. + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all snapshots, including public ones +- amazon.aws.ec2_snapshot_info: + +# Gather information about all snapshots owned by the account 0123456789 +- amazon.aws.ec2_snapshot_info: + filters: + owner-id: 0123456789 + +# Or alternatively... +- amazon.aws.ec2_snapshot_info: + owner_ids: + - 0123456789 + +# Gather information about a particular snapshot using ID +- amazon.aws.ec2_snapshot_info: + filters: + snapshot-id: snap-00112233 + +# Or alternatively... +- amazon.aws.ec2_snapshot_info: + snapshot_ids: + - snap-00112233 + +# Gather information about any snapshot with a tag key Name and value Example +- amazon.aws.ec2_snapshot_info: + filters: + "tag:Name": Example + +# Gather information about any snapshot with an error status +- amazon.aws.ec2_snapshot_info: + filters: + status: error + +''' + +RETURN = ''' +snapshot_id: + description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. + type: str + returned: always + sample: snap-01234567 +volume_id: + description: The ID of the volume that was used to create the snapshot. + type: str + returned: always + sample: vol-01234567 +state: + description: The snapshot state (completed, pending or error). + type: str + returned: always + sample: completed +state_message: + description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper + AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the + error occurred. + type: str + returned: always + sample: +start_time: + description: The time stamp when the snapshot was initiated. + type: str + returned: always + sample: "2015-02-12T02:14:02+00:00" +progress: + description: The progress of the snapshot, as a percentage. + type: str + returned: always + sample: "100%" +owner_id: + description: The AWS account ID of the EBS snapshot owner. + type: str + returned: always + sample: "099720109477" +description: + description: The description for the snapshot. + type: str + returned: always + sample: "My important backup" +volume_size: + description: The size of the volume, in GiB. + type: int + returned: always + sample: 8 +owner_alias: + description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot. + type: str + returned: always + sample: "033440102211" +tags: + description: Any tags assigned to the snapshot. + type: dict + returned: always + sample: "{ 'my_tag_key': 'my_tag_value' }" +encrypted: + description: Indicates whether the snapshot is encrypted. + type: bool + returned: always + sample: "True" +kms_key_id: + description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \ + protect the volume encryption key for the parent volume. + type: str + returned: always + sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456" +data_encryption_key_id: + description: The data encryption key identifier for the snapshot. This value is a unique identifier that \ + corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. + type: str + returned: always + sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456" + +''' + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_ec2_snapshots(connection, module): + + snapshot_ids = module.params.get("snapshot_ids") + owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")] + restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")] + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + snapshots = connection.describe_snapshots( + aws_retry=True, + SnapshotIds=snapshot_ids, OwnerIds=owner_ids, + RestorableByUserIds=restorable_by_user_ids, Filters=filters) + except is_boto3_error_code('InvalidSnapshot.NotFound') as e: + if len(snapshot_ids) > 1: + module.warn("Some of your snapshots may exist, but %s" % str(e)) + snapshots = {'Snapshots': []} + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to describe snapshots') + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_snapshots = [] + for snapshot in snapshots['Snapshots']: + snaked_snapshots.append(camel_dict_to_snake_dict(snapshot)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for snapshot in snaked_snapshots: + if 'tags' in snapshot: + snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value') + + module.exit_json(snapshots=snaked_snapshots) + + +def main(): + + argument_spec = dict( + snapshot_ids=dict(default=[], type='list', elements='str'), + owner_ids=dict(default=[], type='list', elements='str'), + restorable_by_user_ids=dict(default=[], type='list', elements='str'), + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'] + ], + supports_check_mode=True + ) + if module._name == 'ec2_snapshot_facts': + module.deprecate("The 'ec2_snapshot_facts' module has been renamed to 'ec2_snapshot_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_ec2_snapshots(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py new file mode 100644 index 00000000..1d8a1e6f --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_tag +version_added: 1.0.0 +short_description: create and remove tags on ec2 resources +description: + - Creates, modifies and removes tags for any EC2 resource. + - Resources are referenced by their resource id (for example, an instance being i-XXXXXXX, a VPC being vpc-XXXXXXX). + - This module is designed to be used with complex args (tags), see the examples. +requirements: [ "boto3", "botocore" ] +options: + resource: + description: + - The EC2 resource id. + required: true + type: str + state: + description: + - Whether the tags should be present or absent on the resource. + - The use of I(state=list) to interrogate the tags of an instance has been + deprecated and will be removed after 2022-06-01. The 'list' + functionality has been moved to a dedicated module M(amazon.aws.ec2_tag_info). + default: present + choices: ['present', 'absent', 'list'] + type: str + tags: + description: + - A dictionary of tags to add or remove from the resource. + - If the value provided for a key is not set and I(state=absent), the tag will be removed regardless of its current value. + - Required when I(state=present) or I(state=absent). + type: dict + purge_tags: + description: + - Whether unspecified tags should be removed from the resource. + - Note that when combined with I(state=absent), specified tags with non-matching values are not purged. + type: bool + default: false + +author: + - Lester Wade (@lwade) + - Paul Arthur (@flowerysong) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: Ensure tags are present on a resource + amazon.aws.ec2_tag: + region: eu-west-1 + resource: vol-XXXXXX + state: present + tags: + Name: ubervol + env: prod + +- name: Ensure all volumes are tagged + amazon.aws.ec2_tag: + region: eu-west-1 + resource: '{{ item.id }}' + state: present + tags: + Name: dbserver + Env: production + loop: '{{ ec2_vol.volumes }}' + +- name: Remove the Env tag + amazon.aws.ec2_tag: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + tags: + Env: + state: absent + +- name: Remove the Env tag if it's currently 'development' + amazon.aws.ec2_tag: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + tags: + Env: development + state: absent + +- name: Remove all tags except for Name from an instance + amazon.aws.ec2_tag: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + tags: + Name: '' + state: absent + purge_tags: true +''' + +RETURN = ''' +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +added_tags: + description: A dict of tags that were added to the resource + returned: If tags were added + type: dict +removed_tags: + description: A dict of tags that were removed from the resource + returned: If tags were removed + type: dict +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import compare_aws_tags + + +def get_tags(ec2, module, resource): + filters = [{'Name': 'resource-id', 'Values': [resource]}] + try: + result = AWSRetry.jittered_backoff()(ec2.describe_tags)(Filters=filters) + return boto3_tag_list_to_ansible_dict(result['Tags']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource)) + + +def main(): + argument_spec = dict( + resource=dict(required=True), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent', 'list']), + ) + required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])] + + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + + resource = module.params['resource'] + tags = module.params['tags'] + state = module.params['state'] + purge_tags = module.params['purge_tags'] + + result = {'changed': False} + + ec2 = module.client('ec2') + + current_tags = get_tags(ec2, module, resource) + + if state == 'list': + module.deprecate( + 'Using the "list" state has been deprecated. Please use the ec2_tag_info module instead', date='2022-06-01', collection_name='amazon.aws') + module.exit_json(changed=False, tags=current_tags) + + add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) + + remove_tags = {} + if state == 'absent': + for key in tags: + if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): + remove_tags[key] = current_tags[key] + + for key in remove: + remove_tags[key] = current_tags[key] + + if remove_tags: + result['changed'] = True + result['removed_tags'] = remove_tags + if not module.check_mode: + try: + AWSRetry.jittered_backoff()(ec2.delete_tags)(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(remove_tags)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + + if state == 'present' and add_tags: + result['changed'] = True + result['added_tags'] = add_tags + current_tags.update(add_tags) + if not module.check_mode: + try: + AWSRetry.jittered_backoff()(ec2.create_tags)(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(add_tags)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + + result['tags'] = get_tags(ec2, module, resource) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py new file mode 100644 index 00000000..947ce363 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py @@ -0,0 +1,88 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_tag_info +version_added: 1.0.0 +short_description: list tags on ec2 resources +description: + - Lists tags for any EC2 resource. + - Resources are referenced by their resource id (e.g. an instance being i-XXXXXXX, a vpc being vpc-XXXXXX). + - Resource tags can be managed using the M(amazon.aws.ec2_tag) module. +requirements: [ "boto3", "botocore" ] +options: + resource: + description: + - The EC2 resource id (for example i-XXXXXX or vpc-XXXXXX). + required: true + type: str + +author: + - Mark Chappell (@tremble) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: Retrieve all tags on an instance + amazon.aws.ec2_tag_info: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + register: instance_tags + +- name: Retrieve all tags on a VPC + amazon.aws.ec2_tag_info: + region: eu-west-1 + resource: vpc-xxxxxxxxxxxxxxxxx + register: vpc_tags +''' + +RETURN = ''' +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except Exception: + pass # Handled by AnsibleAWSModule + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict, AWSRetry + + +@AWSRetry.jittered_backoff() +def get_tags(ec2, module, resource): + filters = [{'Name': 'resource-id', 'Values': [resource]}] + return boto3_tag_list_to_ansible_dict(ec2.describe_tags(Filters=filters)['Tags']) + + +def main(): + argument_spec = dict( + resource=dict(required=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + resource = module.params['resource'] + ec2 = module.client('ec2') + + try: + current_tags = get_tags(ec2, module, resource) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource)) + + module.exit_json(changed=False, tags=current_tags) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py new file mode 100644 index 00000000..fb85a85d --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py @@ -0,0 +1,809 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vol +version_added: 1.0.0 +short_description: Create and attach a volume, return volume id and device map +description: + - Creates an EBS volume and optionally attaches it to an instance. + - If both I(instance) and I(name) are given and the instance has a device at the device name, then no volume is created and no attachment is made. + - This module has a dependency on python-boto. +options: + instance: + description: + - Instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach. + type: str + name: + description: + - Volume Name tag if you wish to attach an existing volume (requires instance) + type: str + id: + description: + - Volume id if you wish to attach an existing volume (requires instance) or remove an existing volume + type: str + volume_size: + description: + - Size of volume (in GiB) to create. + type: int + volume_type: + description: + - Type of EBS volume; standard (magnetic), gp2 (SSD), gp3 (SSD), io1 (Provisioned IOPS), io2 (Provisioned IOPS), + st1 (Throughput Optimized HDD), sc1 (Cold HDD). + "Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility. + default: standard + choices: ['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2'] + type: str + iops: + description: + - The provisioned IOPs you want to associate with this volume (integer). + - By default AWS will set this to 100. + type: int + encrypted: + description: + - Enable encryption at rest for this volume. + default: false + type: bool + kms_key_id: + description: + - Specify the id of the KMS key to use. + type: str + device_name: + description: + - Device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. + type: str + delete_on_termination: + description: + - When set to C(true), the volume will be deleted upon instance termination. + type: bool + default: false + zone: + description: + - Zone in which to create the volume, if unset uses the zone the instance is in (if set). + aliases: ['availability_zone', 'aws_zone', 'ec2_zone'] + type: str + snapshot: + description: + - Snapshot ID on which to base the volume. + type: str + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. + type: bool + default: true + state: + description: + - Whether to ensure the volume is present or absent. + - The use of I(state=list) to interrogate the volume has been deprecated + and will be removed after 2022-06-01. The 'list' functionality + has been moved to a dedicated module M(amazon.aws.ec2_vol_info). + default: present + choices: ['absent', 'present', 'list'] + type: str + tags: + description: + - tag:value pairs to add to the volume after creation. + default: {} + type: dict + modify_volume: + description: + - The volume won't be modify unless this key is C(true). + type: bool + default: false + version_added: 1.4.0 + throughput: + description: + - Volume throughput in MB/s. + - This parameter is only valid for gp3 volumes. + - Valid range is from 125 to 1000. + type: int + version_added: 1.4.0 +author: "Lester Wade (@lwade)" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +requirements: [ boto3>=1.16.33 ] +''' + +EXAMPLES = ''' +# Simple attachment action +- amazon.aws.ec2_vol: + instance: XXXXXX + volume_size: 5 + device_name: sdd + region: us-west-2 + +# Example using custom iops params +- amazon.aws.ec2_vol: + instance: XXXXXX + volume_size: 5 + iops: 100 + device_name: sdd + region: us-west-2 + +# Example using snapshot id +- amazon.aws.ec2_vol: + instance: XXXXXX + snapshot: "{{ snapshot }}" + +# Playbook example combined with instance launch +- amazon.aws.ec2: + keypair: "{{ keypair }}" + image: "{{ image }}" + wait: yes + count: 3 + register: ec2 +- amazon.aws.ec2_vol: + instance: "{{ item.id }}" + volume_size: 5 + loop: "{{ ec2.instances }}" + register: ec2_vol + +# Example: Launch an instance and then add a volume if not already attached +# * Volume will be created with the given name if not already created. +# * Nothing will happen if the volume is already attached. +# * Requires Ansible 2.0 + +- amazon.aws.ec2: + keypair: "{{ keypair }}" + image: "{{ image }}" + zone: YYYYYY + id: my_instance + wait: yes + count: 1 + register: ec2 + +- amazon.aws.ec2_vol: + instance: "{{ item.id }}" + name: my_existing_volume_Name_tag + device_name: /dev/xvdf + loop: "{{ ec2.instances }}" + register: ec2_vol + +# Remove a volume +- amazon.aws.ec2_vol: + id: vol-XXXXXXXX + state: absent + +# Detach a volume (since 1.9) +- amazon.aws.ec2_vol: + id: vol-XXXXXXXX + instance: None + region: us-west-2 + +# List volumes for an instance +- amazon.aws.ec2_vol: + instance: i-XXXXXX + state: list + region: us-west-2 + +# Create new volume using SSD storage +- amazon.aws.ec2_vol: + instance: XXXXXX + volume_size: 50 + volume_type: gp2 + device_name: /dev/xvdf + +# Attach an existing volume to instance. The volume will be deleted upon instance termination. +- amazon.aws.ec2_vol: + instance: XXXXXX + id: XXXXXX + device_name: /dev/sdf + delete_on_termination: yes +''' + +RETURN = ''' +device: + description: device name of attached volume + returned: when success + type: str + sample: "/def/sdf" +volume_id: + description: the id of volume + returned: when success + type: str + sample: "vol-35b333d9" +volume_type: + description: the volume type + returned: when success + type: str + sample: "standard" +volume: + description: a dictionary containing detailed attributes of the volume + returned: when success + type: str + sample: { + "attachment_set": { + "attach_time": "2015-10-23T00:22:29.000Z", + "deleteOnTermination": "false", + "device": "/dev/sdf", + "instance_id": "i-8356263c", + "status": "attached" + }, + "create_time": "2015-10-21T14:36:08.870Z", + "encrypted": false, + "id": "vol-35b333d9", + "iops": null, + "size": 1, + "snapshot_id": "", + "status": "in-use", + "tags": { + "env": "dev" + }, + "type": "standard", + "zone": "us-east-1b" + } +''' + +import time + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import camel_dict_to_snake_dict +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import compare_aws_tags +from ..module_utils.ec2 import AWSRetry +from ..module_utils.core import is_boto3_error_code + +try: + import botocore +except ImportError: + pass # Taken care of by AnsibleAWSModule + + +def get_instance(module, ec2_conn, instance_id=None): + instance = None + if not instance_id: + return instance + + try: + reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance_id]) + instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance)) + + return instance + + +def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True): + name = module.params.get('name') + param_id = module.params.get('id') + zone = module.params.get('zone') + + if not vol_id: + vol_id = param_id + + # If no name or id supplied, just try volume creation based on module parameters + if vol_id is None and name is None: + return None + + find_params = dict() + vols = [] + + if vol_id: + find_params['VolumeIds'] = [vol_id] + elif name: + find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name}) + elif zone: + find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone}) + + try: + paginator = ec2_conn.get_paginator('describe_volumes') + vols_response = paginator.paginate(**find_params) + vols = list(vols_response)[0].get('Volumes') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if is_boto3_error_code('InvalidVolume.NotFound'): + module.exit_json(msg="Volume {0} does not exist".format(vol_id), changed=False) + module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params)) + + if not vols: + if fail_on_not_found and vol_id: + msg = "Could not find volume with id: {0}".format(vol_id) + if name: + msg += (" and name: {0}".format(name)) + module.fail_json(msg=msg) + else: + return None + + if len(vols) > 1: + module.fail_json( + msg="Found more than one volume in zone (if specified) with name: {0}".format(name), + found=[v['VolumeId'] for v in vols] + ) + vol = camel_dict_to_snake_dict(vols[0]) + return vol + + +def get_volumes(module, ec2_conn): + instance = module.params.get('instance') + + find_params = dict() + if instance: + find_params['Filters'] = ansible_dict_to_boto3_filter_list({'attachment.instance-id': instance}) + + vols = [] + try: + vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params) + vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get('Volumes', [])] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while getting EBS volumes') + return vols + + +def delete_volume(module, ec2_conn, volume_id=None): + changed = False + if volume_id: + try: + ec2_conn.delete_volume(aws_retry=True, VolumeId=volume_id) + changed = True + except is_boto3_error_code('InvalidVolume.NotFound'): + module.exit_json(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Error while deleting volume') + return changed + + +def update_volume(module, ec2_conn, volume): + changed = False + req_obj = {'VolumeId': volume['volume_id']} + + if module.params.get('modify_volume'): + iops_changed = False + if volume['volume_type'] != 'standard': + target_iops = module.params.get('iops') + if target_iops: + original_iops = volume['iops'] + if target_iops != original_iops: + iops_changed = True + req_obj['iops'] = target_iops + + target_size = module.params.get('volume_size') + size_changed = False + if target_size: + original_size = volume['size'] + if target_size != original_size: + size_changed = True + req_obj['size'] = target_size + + target_type = module.params.get('volume_type') + original_type = None + type_changed = False + if target_type: + original_type = volume['volume_type'] + if target_type != original_type: + type_changed = True + req_obj['VolumeType'] = target_type + + target_throughput = module.params.get('throughput') + throughput_changed = False + if 'gp3' in [target_type, original_type]: + if target_throughput: + original_throughput = volume.get('throughput') + if target_throughput != original_throughput: + throughput_changed = True + req_obj['Throughput'] = target_throughput + + changed = iops_changed or size_changed or type_changed or throughput_changed + + if changed: + response = ec2_conn.modify_volume(**req_obj) + + volume['size'] = response.get('VolumeModification').get('TargetSize') + volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType') + volume['iops'] = response.get('VolumeModification').get('TargetIops') + volume['throughput'] = response.get('VolumeModification').get('TargetThroughput') + + return volume, changed + + +def create_volume(module, ec2_conn, zone): + changed = False + iops = module.params.get('iops') + encrypted = module.params.get('encrypted') + kms_key_id = module.params.get('kms_key_id') + volume_size = module.params.get('volume_size') + volume_type = module.params.get('volume_type') + snapshot = module.params.get('snapshot') + throughput = module.params.get('throughput') + # If custom iops is defined we use volume_type "io1" rather than the default of "standard" + if iops: + volume_type = 'io1' + + volume = get_volume(module, ec2_conn) + + if volume is None: + + try: + changed = True + additional_params = dict() + + if volume_size: + additional_params['Size'] = int(volume_size) + + if kms_key_id: + additional_params['KmsKeyId'] = kms_key_id + + if snapshot: + additional_params['SnapshotId'] = snapshot + + if iops: + additional_params['Iops'] = int(iops) + + if throughput: + additional_params['Throughput'] = int(throughput) + + create_vol_response = ec2_conn.create_volume( + aws_retry=True, + AvailabilityZone=zone, + Encrypted=encrypted, + VolumeType=volume_type, + **additional_params + ) + + waiter = ec2_conn.get_waiter('volume_available') + waiter.wait( + VolumeIds=[create_vol_response['VolumeId']], + ) + volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while creating EBS volume') + + return volume, changed + + +def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name): + changed = False + + # If device_name isn't set, make a choice based on best practices here: + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html + + # In future this needs to be more dynamic but combining block device mapping best practices + # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) + + attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + if attachment_data: + if attachment_data.get('instance_id', None) != instance_dict['instance_id']: + module.fail_json(msg="Volume {0} is already attached to another instance: {1}".format(volume_dict['volume_id'], + attachment_data.get('instance_id', None))) + else: + return volume_dict, changed + + try: + attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name, + InstanceId=instance_dict['instance_id'], + VolumeId=volume_dict['volume_id']) + + waiter = ec2_conn.get_waiter('volume_in_use') + waiter.wait(VolumeIds=[attach_response['VolumeId']]) + changed = True + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while attaching EBS volume') + + modify_dot_attribute(module, ec2_conn, instance_dict, device_name) + + volume = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + return volume, changed + + +def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): + """ Modify delete_on_termination attribute """ + + delete_on_termination = module.params.get('delete_on_termination') + changed = False + + # volume_in_use can return *shortly* before it appears on the instance + # description + mapped_block_device = None + _attempt = 0 + while mapped_block_device is None: + _attempt += 1 + instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict['instance_id']) + mapped_block_device = get_mapped_block_device(instance_dict=instance_dict, device_name=device_name) + if mapped_block_device is None: + if _attempt > 2: + module.fail_json(msg='Unable to find device on instance', + device=device_name, instance=instance_dict) + time.sleep(1) + + if delete_on_termination != mapped_block_device['ebs'].get('delete_on_termination'): + try: + ec2_conn.modify_instance_attribute( + aws_retry=True, + InstanceId=instance_dict['instance_id'], + BlockDeviceMappings=[{ + "DeviceName": device_name, + "Ebs": { + "DeleteOnTermination": delete_on_termination + } + }] + ) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, + msg='Error while modifying Block Device Mapping of instance {0}'.format(instance_dict['instance_id'])) + + return changed + + +def get_attachment_data(volume_dict, wanted_state=None): + changed = False + + attachment_data = {} + if not volume_dict: + return attachment_data + for data in volume_dict.get('attachments', []): + if wanted_state and wanted_state == data['state']: + attachment_data = data + break + else: + # No filter, return first + attachment_data = data + break + + return attachment_data + + +def detach_volume(module, ec2_conn, volume_dict): + changed = False + + attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + if attachment_data: + ec2_conn.detach_volume(aws_retry=True, VolumeId=volume_dict['volume_id']) + waiter = ec2_conn.get_waiter('volume_available') + waiter.wait( + VolumeIds=[volume_dict['volume_id']], + ) + changed = True + + volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + return volume_dict, changed + + +def get_volume_info(volume, tags=None): + if not tags: + tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) + attachment_data = get_attachment_data(volume) + volume_info = { + 'create_time': volume.get('create_time'), + 'encrypted': volume.get('encrypted'), + 'id': volume.get('volume_id'), + 'iops': volume.get('iops'), + 'size': volume.get('size'), + 'snapshot_id': volume.get('snapshot_id'), + 'status': volume.get('state'), + 'type': volume.get('volume_type'), + 'zone': volume.get('availability_zone'), + 'throughput': volume.get('throughput'), + 'attachment_set': { + 'attach_time': attachment_data.get('attach_time', None), + 'device': attachment_data.get('device', None), + 'instance_id': attachment_data.get('instance_id', None), + 'status': attachment_data.get('state', None), + 'deleteOnTermination': attachment_data.get('delete_on_termination', None) + }, + 'tags': tags + } + + return volume_info + + +def get_mapped_block_device(instance_dict=None, device_name=None): + mapped_block_device = None + if not instance_dict: + return mapped_block_device + if not device_name: + return mapped_block_device + + for device in instance_dict.get('block_device_mappings', []): + if device['device_name'] == device_name: + mapped_block_device = device + break + + return mapped_block_device + + +def ensure_tags(module, connection, res_id, res_type, tags, add_only): + changed = False + + filters = ansible_dict_to_boto3_filter_list({'resource-id': res_id, 'resource-type': res_type}) + cur_tags = None + try: + cur_tags = connection.describe_tags(aws_retry=True, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe tags") + + purge_tags = bool(not add_only) + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) + final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) + + if to_update: + try: + if module.check_mode: + # update tags + final_tags.update(to_update) + else: + connection.create_tags( + aws_retry=True, + Resources=[res_id], + Tags=ansible_dict_to_boto3_tag_list(to_update) + ) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create tags") + + if to_delete: + try: + if module.check_mode: + # update tags + for key in to_delete: + del final_tags[key] + else: + tags_list = [] + for key in to_delete: + tags_list.append({'Key': key}) + + connection.delete_tags(aws_retry=True, Resources=[res_id], Tags=tags_list) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete tags") + + if not module.check_mode and (to_update or to_delete): + try: + response = connection.describe_tags(aws_retry=True, Filters=filters) + final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe tags") + + return final_tags, changed + + +def main(): + argument_spec = dict( + instance=dict(), + id=dict(), + name=dict(), + volume_size=dict(type='int'), + volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']), + iops=dict(type='int'), + encrypted=dict(default=False, type='bool'), + kms_key_id=dict(), + device_name=dict(), + delete_on_termination=dict(default=False, type='bool'), + zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), + snapshot=dict(), + state=dict(default='present', choices=['absent', 'present', 'list']), + tags=dict(default={}, type='dict'), + modify_volume=dict(default=False, type='bool'), + throughput=dict(type='int') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + param_id = module.params.get('id') + name = module.params.get('name') + instance = module.params.get('instance') + volume_size = module.params.get('volume_size') + device_name = module.params.get('device_name') + zone = module.params.get('zone') + snapshot = module.params.get('snapshot') + state = module.params.get('state') + tags = module.params.get('tags') + + if state == 'list': + module.deprecate( + 'Using the "list" state has been deprecated. Please use the ec2_vol_info module instead', date='2022-06-01', collection_name='amazon.aws') + + # Ensure we have the zone or can get the zone + if instance is None and zone is None and state == 'present': + module.fail_json(msg="You must specify either instance or zone") + + # Set volume detach flag + if instance == 'None' or instance == '': + instance = None + detach_vol_flag = True + else: + detach_vol_flag = False + + # Set changed flag + changed = False + + ec2_conn = module.client('ec2', AWSRetry.jittered_backoff()) + + if state == 'list': + returned_volumes = [] + vols = get_volumes(module, ec2_conn) + + for v in vols: + returned_volumes.append(get_volume_info(v)) + + module.exit_json(changed=False, volumes=returned_volumes) + + # Here we need to get the zone info for the instance. This covers situation where + # instance is specified but zone isn't. + # Useful for playbooks chaining instance launch with volume create + attach and where the + # zone doesn't matter to the user. + inst = None + + # Delaying the checks until after the instance check allows us to get volume ids for existing volumes + # without needing to pass an unused volume_size + if not volume_size and not (param_id or name or snapshot): + module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") + + # Try getting volume + volume = get_volume(module, ec2_conn, fail_on_not_found=False) + if state == 'present': + if instance: + inst = get_instance(module, ec2_conn, instance_id=instance) + zone = inst['placement']['availability_zone'] + + # Use password data attribute to tell whether the instance is Windows or Linux + if device_name is None: + if inst['platform'] == 'Windows': + device_name = '/dev/xvdf' + else: + device_name = '/dev/sdf' + + # Check if there is a volume already mounted there. + mapped_device = get_mapped_block_device(instance_dict=inst, device_name=device_name) + if mapped_device: + other_volume_mapped = False + + if volume: + if volume['volume_id'] != mapped_device['ebs']['volume_id']: + other_volume_mapped = True + else: + # No volume found so this is another volume + other_volume_mapped = True + + if other_volume_mapped: + module.exit_json( + msg="Volume mapping for {0} already exists on instance {1}".format(device_name, instance), + volume_id=mapped_device['ebs']['volume_id'], + found_volume=volume, + device=device_name, + changed=False + ) + + attach_state_changed = False + + if volume: + volume, changed = update_volume(module, ec2_conn, volume) + else: + volume, changed = create_volume(module, ec2_conn, zone=zone) + + tags['Name'] = name + final_tags, tags_changed = ensure_tags(module, ec2_conn, volume['volume_id'], 'volume', tags, False) + + if detach_vol_flag: + volume, changed = detach_volume(module, ec2_conn, volume_dict=volume) + elif inst is not None: + volume, changed = attach_volume(module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name) + + # Add device, volume_id and volume_type parameters separately to maintain backward compatibility + volume_info = get_volume_info(volume, tags=final_tags) + + module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], + volume_id=volume_info['id'], volume_type=volume_info['type']) + elif state == 'absent': + if not name and not param_id: + module.fail_json('A volume name or id is required for deletion') + if volume: + detach_volume(module, ec2_conn, volume_dict=volume) + changed = delete_volume(module, ec2_conn, volume_id=volume['volume_id']) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_facts.py new file mode 100644 index 00000000..fb6a6587 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_facts.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vol_info +version_added: 1.0.0 +short_description: Gather information about ec2 volumes in AWS +description: + - Gather information about ec2 volumes in AWS. + - This module was called C(ec2_vol_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + filters: + type: dict + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters. +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all volumes +- amazon.aws.ec2_vol_info: + +# Gather information about a particular volume using volume ID +- amazon.aws.ec2_vol_info: + filters: + volume-id: vol-00112233 + +# Gather information about any volume with a tag key Name and value Example +- amazon.aws.ec2_vol_info: + filters: + "tag:Name": Example + +# Gather information about any volume that is attached +- amazon.aws.ec2_vol_info: + filters: + attachment.status: attached + +''' + +RETURN = ''' +volumes: + description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume. + type: list + elements: dict + returned: always + contains: + attachment_set: + description: Information about the volume attachments. + type: dict + sample: { + "attach_time": "2015-10-23T00:22:29.000Z", + "deleteOnTermination": "false", + "device": "/dev/sdf", + "instance_id": "i-8356263c", + "status": "attached" + } + create_time: + description: The time stamp when volume creation was initiated. + type: str + sample: "2015-10-21T14:36:08.870Z" + encrypted: + description: Indicates whether the volume is encrypted. + type: bool + sample: False + id: + description: The ID of the volume. + type: str + sample: "vol-35b333d9" + iops: + description: The number of I/O operations per second (IOPS) that the volume supports. + type: int + sample: null + size: + description: The size of the volume, in GiBs. + type: int + sample: 1 + snapshot_id: + description: The snapshot from which the volume was created, if applicable. + type: str + sample: "" + status: + description: The volume state. + type: str + sample: "in-use" + tags: + description: Any tags assigned to the volume. + type: dict + sample: { + env: "dev" + } + type: + description: The volume type. This can be gp2, io1, st1, sc1, or standard. + type: str + sample: "standard" + zone: + description: The Availability Zone of the volume. + type: str + sample: "us-east-1b" +''' + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def get_volume_info(volume, region): + + attachment = volume["attachments"] + + volume_info = { + 'create_time': volume["create_time"], + 'id': volume["volume_id"], + 'encrypted': volume["encrypted"], + 'iops': volume["iops"] if "iops" in volume else None, + 'size': volume["size"], + 'snapshot_id': volume["snapshot_id"], + 'status': volume["state"], + 'type': volume["volume_type"], + 'zone': volume["availability_zone"], + 'region': region, + 'attachment_set': { + 'attach_time': attachment[0]["attach_time"] if len(attachment) > 0 else None, + 'device': attachment[0]["device"] if len(attachment) > 0 else None, + 'instance_id': attachment[0]["instance_id"] if len(attachment) > 0 else None, + 'status': attachment[0]["state"] if len(attachment) > 0 else None, + 'delete_on_termination': attachment[0]["delete_on_termination"] if len(attachment) > 0 else None + }, + 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None + } + + return volume_info + + +@AWSRetry.jittered_backoff() +def describe_volumes_with_backoff(connection, filters): + paginator = connection.get_paginator('describe_volumes') + return paginator.paginate(Filters=filters).build_full_result() + + +def list_ec2_volumes(connection, module): + + # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags + sanitized_filters = module.params.get("filters") + for key in list(sanitized_filters): + if not key.startswith("tag:"): + sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key) + volume_dict_array = [] + + try: + all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters)) + except ClientError as e: + module.fail_json_aws(e, msg="Failed to describe volumes.") + + for volume in all_volumes["Volumes"]: + volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags']) + volume_dict_array.append(get_volume_info(volume, module.region)) + module.exit_json(volumes=volume_dict_array) + + +def main(): + argument_spec = dict(filters=dict(default={}, type='dict')) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vol_facts': + module.deprecate("The 'ec2_vol_facts' module has been renamed to 'ec2_vol_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2') + + list_ec2_volumes(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py new file mode 100644 index 00000000..fb6a6587 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vol_info +version_added: 1.0.0 +short_description: Gather information about ec2 volumes in AWS +description: + - Gather information about ec2 volumes in AWS. + - This module was called C(ec2_vol_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + filters: + type: dict + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters. +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all volumes +- amazon.aws.ec2_vol_info: + +# Gather information about a particular volume using volume ID +- amazon.aws.ec2_vol_info: + filters: + volume-id: vol-00112233 + +# Gather information about any volume with a tag key Name and value Example +- amazon.aws.ec2_vol_info: + filters: + "tag:Name": Example + +# Gather information about any volume that is attached +- amazon.aws.ec2_vol_info: + filters: + attachment.status: attached + +''' + +RETURN = ''' +volumes: + description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume. + type: list + elements: dict + returned: always + contains: + attachment_set: + description: Information about the volume attachments. + type: dict + sample: { + "attach_time": "2015-10-23T00:22:29.000Z", + "deleteOnTermination": "false", + "device": "/dev/sdf", + "instance_id": "i-8356263c", + "status": "attached" + } + create_time: + description: The time stamp when volume creation was initiated. + type: str + sample: "2015-10-21T14:36:08.870Z" + encrypted: + description: Indicates whether the volume is encrypted. + type: bool + sample: False + id: + description: The ID of the volume. + type: str + sample: "vol-35b333d9" + iops: + description: The number of I/O operations per second (IOPS) that the volume supports. + type: int + sample: null + size: + description: The size of the volume, in GiBs. + type: int + sample: 1 + snapshot_id: + description: The snapshot from which the volume was created, if applicable. + type: str + sample: "" + status: + description: The volume state. + type: str + sample: "in-use" + tags: + description: Any tags assigned to the volume. + type: dict + sample: { + env: "dev" + } + type: + description: The volume type. This can be gp2, io1, st1, sc1, or standard. + type: str + sample: "standard" + zone: + description: The Availability Zone of the volume. + type: str + sample: "us-east-1b" +''' + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def get_volume_info(volume, region): + + attachment = volume["attachments"] + + volume_info = { + 'create_time': volume["create_time"], + 'id': volume["volume_id"], + 'encrypted': volume["encrypted"], + 'iops': volume["iops"] if "iops" in volume else None, + 'size': volume["size"], + 'snapshot_id': volume["snapshot_id"], + 'status': volume["state"], + 'type': volume["volume_type"], + 'zone': volume["availability_zone"], + 'region': region, + 'attachment_set': { + 'attach_time': attachment[0]["attach_time"] if len(attachment) > 0 else None, + 'device': attachment[0]["device"] if len(attachment) > 0 else None, + 'instance_id': attachment[0]["instance_id"] if len(attachment) > 0 else None, + 'status': attachment[0]["state"] if len(attachment) > 0 else None, + 'delete_on_termination': attachment[0]["delete_on_termination"] if len(attachment) > 0 else None + }, + 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None + } + + return volume_info + + +@AWSRetry.jittered_backoff() +def describe_volumes_with_backoff(connection, filters): + paginator = connection.get_paginator('describe_volumes') + return paginator.paginate(Filters=filters).build_full_result() + + +def list_ec2_volumes(connection, module): + + # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags + sanitized_filters = module.params.get("filters") + for key in list(sanitized_filters): + if not key.startswith("tag:"): + sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key) + volume_dict_array = [] + + try: + all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters)) + except ClientError as e: + module.fail_json_aws(e, msg="Failed to describe volumes.") + + for volume in all_volumes["Volumes"]: + volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags']) + volume_dict_array.append(get_volume_info(volume, module.region)) + module.exit_json(volumes=volume_dict_array) + + +def main(): + argument_spec = dict(filters=dict(default={}, type='dict')) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vol_facts': + module.deprecate("The 'ec2_vol_facts' module has been renamed to 'ec2_vol_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2') + + list_ec2_volumes(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py new file mode 100644 index 00000000..5cbb8e6b --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py @@ -0,0 +1,413 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_dhcp_option +version_added: 1.0.0 +short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's + requested +description: + - This module removes, or creates DHCP option sets, and can associate them to a VPC. + Optionally, a new DHCP Options set can be created that converges a VPC's existing + DHCP option set with values provided. + When dhcp_options_id is provided, the module will + 1. remove (with state='absent') + 2. ensure tags are applied (if state='present' and tags are provided + 3. attach it to a VPC (if state='present' and a vpc_id is provided. + If any of the optional values are missing, they will either be treated + as a no-op (i.e., inherit what already exists for the VPC) + To remove existing options while inheriting, supply an empty value + (e.g. set ntp_servers to [] if you want to remove them from the VPC's options) + Most of the options should be self-explanatory. +author: "Joel Thompson (@joelthompson)" +options: + domain_name: + description: + - The domain name to set in the DHCP option sets + type: str + dns_servers: + description: + - A list of hosts to set the DNS servers for the VPC to. (Should be a + list of IP addresses rather than host names.) + type: list + elements: str + ntp_servers: + description: + - List of hosts to advertise as NTP servers for the VPC. + type: list + elements: str + netbios_name_servers: + description: + - List of hosts to advertise as NetBIOS servers. + type: list + elements: str + netbios_node_type: + description: + - NetBIOS node type to advertise in the DHCP options. + The AWS recommendation is to use 2 (when using netbios name services) + U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) + type: int + vpc_id: + description: + - VPC ID to associate with the requested DHCP option set. + If no vpc id is provided, and no matching option set is found then a new + DHCP option set is created. + type: str + delete_old: + description: + - Whether to delete the old VPC DHCP option set when associating a new one. + This is primarily useful for debugging/development purposes when you + want to quickly roll back to the old option set. Note that this setting + will be ignored, and the old DHCP option set will be preserved, if it + is in use by any other VPC. (Otherwise, AWS will return an error.) + type: bool + default: 'yes' + inherit_existing: + description: + - For any DHCP options not specified in these parameters, whether to + inherit them from the options set already applied to vpc_id, or to + reset them to be empty. + type: bool + default: 'no' + tags: + description: + - Tags to be applied to a VPC options set if a new one is created, or + if the resource_id is provided. (options must match) + aliases: [ 'resource_tags'] + type: dict + dhcp_options_id: + description: + - The resource_id of an existing DHCP options set. + If this is specified, then it will override other settings, except tags + (which will be updated to match) + type: str + state: + description: + - create/assign or remove the DHCP options. + If state is set to absent, then a DHCP options set matched either + by id, or tags and options will be removed if possible. + default: present + choices: [ 'absent', 'present' ] + type: str +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +requirements: + - boto +''' + +RETURN = """ +new_options: + description: The DHCP options created, associated or found + returned: when appropriate + type: dict + sample: + domain-name-servers: + - 10.0.0.1 + - 10.0.1.1 + netbois-name-servers: + - 10.0.0.1 + - 10.0.1.1 + netbios-node-type: 2 + domain-name: "my.example.com" +dhcp_options_id: + description: The aws resource id of the primary DCHP options set created, found or removed + type: str + returned: when available +changed: + description: Whether the dhcp options were changed + type: bool + returned: always +""" + +EXAMPLES = """ +# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing +# DHCP option set that may have been attached to that VPC. +- amazon.aws.ec2_vpc_dhcp_option: + domain_name: "foo.example.com" + region: us-east-1 + dns_servers: + - 10.0.0.1 + - 10.0.1.1 + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + vpc_id: vpc-123456 + delete_old: True + inherit_existing: False + + +# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but +# keep any other existing settings. Also, keep the old DHCP option set around. +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dns_servers: + - "{{groups['dns-primary']}}" + - "{{groups['dns-secondary']}}" + vpc_id: vpc-123456 + inherit_existing: True + delete_old: False + + +## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags +## but do not assign to a VPC +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dns_servers: + - 4.4.4.4 + - 8.8.8.8 + tags: + Name: google servers + Environment: Test + +## Delete a DHCP options set that matches the tags and options specified +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dns_servers: + - 4.4.4.4 + - 8.8.8.8 + tags: + Name: google servers + Environment: Test + state: absent + +## Associate a DHCP options set with a VPC by ID +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dhcp_options_id: dopt-12345678 + vpc_id: vpc-123456 + +""" + +import collections +from time import sleep, time + +try: + import boto.vpc + import boto.ec2 + from boto.exception import EC2ResponseError +except ImportError: + pass # Taken care of by ec2.HAS_BOTO + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import HAS_BOTO +from ..module_utils.ec2 import connect_to_aws +from ..module_utils.ec2 import get_aws_connection_info + + +def get_resource_tags(vpc_conn, resource_id): + return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id})) + + +def retry_not_found(to_call, *args, **kwargs): + start_time = time() + while time() < start_time + 300: + try: + return to_call(*args, **kwargs) + except EC2ResponseError as e: + if e.error_code in ['InvalidDhcpOptionID.NotFound', 'InvalidDhcpOptionsID.NotFound']: + sleep(3) + continue + raise e + + +def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode): + try: + cur_tags = get_resource_tags(vpc_conn, resource_id) + if tags == cur_tags: + return {'changed': False, 'tags': cur_tags} + + to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags) + if to_delete and not add_only: + retry_not_found(vpc_conn.delete_tags, resource_id, to_delete, dry_run=check_mode) + + to_add = dict((k, tags[k]) for k in tags if k not in cur_tags) + if to_add: + retry_not_found(vpc_conn.create_tags, resource_id, to_add, dry_run=check_mode) + + latest_tags = get_resource_tags(vpc_conn, resource_id) + return {'changed': True, 'tags': latest_tags} + except EC2ResponseError as e: + module.fail_json_aws(e, msg='Failed to modify tags') + + +def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id): + """ + Returns the DHCP options object currently associated with the requested VPC ID using the VPC + connection variable. + """ + vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id]) + if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default": + return None + dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id]) + if len(dhcp_options) != 1: + return None + return dhcp_options[0] + + +def match_dhcp_options(vpc_conn, tags=None, options=None): + """ + Finds a DHCP Options object that optionally matches the tags and options provided + """ + dhcp_options = vpc_conn.get_all_dhcp_options() + for dopts in dhcp_options: + if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags: + if (not options) or dopts.options == options: + return(True, dopts) + return(False, None) + + +def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id): + associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id}) + if len(associations) > 0: + return False + else: + vpc_conn.delete_dhcp_options(dhcp_options_id) + return True + + +def main(): + argument_spec = dict( + dhcp_options_id=dict(type='str', default=None), + domain_name=dict(type='str', default=None), + dns_servers=dict(type='list', elements='str', default=None), + ntp_servers=dict(type='list', elements='str', default=None), + netbios_name_servers=dict(type='list', elements='str', default=None), + netbios_node_type=dict(type='int', default=None), + vpc_id=dict(type='str', default=None), + delete_old=dict(type='bool', default=True), + inherit_existing=dict(type='bool', default=False), + tags=dict(type='dict', default=None, aliases=['resource_tags']), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + check_boto3=False, + supports_check_mode=True + ) + + params = module.params + found = False + changed = False + new_options = collections.defaultdict(lambda: None) + + if not HAS_BOTO: + module.fail_json(msg='boto is required for this module') + + region, ec2_url, boto_params = get_aws_connection_info(module) + connection = connect_to_aws(boto.vpc, region, **boto_params) + + existing_options = None + + # First check if we were given a dhcp_options_id + if not params['dhcp_options_id']: + # No, so create new_options from the parameters + if params['dns_servers'] is not None: + new_options['domain-name-servers'] = params['dns_servers'] + if params['netbios_name_servers'] is not None: + new_options['netbios-name-servers'] = params['netbios_name_servers'] + if params['ntp_servers'] is not None: + new_options['ntp-servers'] = params['ntp_servers'] + if params['domain_name'] is not None: + # needs to be a list for comparison with boto objects later + new_options['domain-name'] = [params['domain_name']] + if params['netbios_node_type'] is not None: + # needs to be a list for comparison with boto objects later + new_options['netbios-node-type'] = [str(params['netbios_node_type'])] + # If we were given a vpc_id then we need to look at the options on that + if params['vpc_id']: + existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id']) + # if we've been asked to inherit existing options, do that now + if params['inherit_existing']: + if existing_options: + for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']: + if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]): + new_options[option] = existing_options.options.get(option) + + # Do the vpc's dhcp options already match what we're asked for? if so we are done + if existing_options and new_options == existing_options.options: + module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id) + + # If no vpc_id was given, or the options don't match then look for an existing set using tags + found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options) + + # Now let's cover the case where there are existing options that we were told about by id + # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given) + else: + supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']}) + if len(supplied_options) != 1: + if params['state'] != 'absent': + module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist") + else: + found = True + dhcp_option = supplied_options[0] + if params['state'] != 'absent' and params['tags']: + ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode) + + # Now we have the dhcp options set, let's do the necessary + + # if we found options we were asked to remove then try to do so + if params['state'] == 'absent': + if not module.check_mode: + if found: + changed = remove_dhcp_options_by_id(connection, dhcp_option.id) + module.exit_json(changed=changed, new_options={}) + + # otherwise if we haven't found the required options we have something to do + elif not module.check_mode and not found: + + # create some dhcp options if we weren't able to use existing ones + if not found: + # Convert netbios-node-type and domain-name back to strings + if new_options['netbios-node-type']: + new_options['netbios-node-type'] = new_options['netbios-node-type'][0] + if new_options['domain-name']: + new_options['domain-name'] = new_options['domain-name'][0] + + # create the new dhcp options set requested + dhcp_option = connection.create_dhcp_options( + new_options['domain-name'], + new_options['domain-name-servers'], + new_options['ntp-servers'], + new_options['netbios-name-servers'], + new_options['netbios-node-type']) + + # wait for dhcp option to be accessible + found_dhcp_opt = False + start_time = time() + try: + found_dhcp_opt = retry_not_found(connection.get_all_dhcp_options, dhcp_options_ids=[dhcp_option.id]) + except EC2ResponseError as e: + module.fail_json_aws(e, msg="Failed to describe DHCP options") + if not found_dhcp_opt: + module.fail_json(msg="Failed to wait for {0} to be available.".format(dhcp_option.id)) + + changed = True + if params['tags']: + ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode) + + # If we were given a vpc_id, then attach the options we now have to that before we finish + if params['vpc_id'] and not module.check_mode: + changed = True + connection.associate_dhcp_options(dhcp_option.id, params['vpc_id']) + # and remove old ones if that was requested + if params['delete_old'] and existing_options: + remove_dhcp_options_by_id(connection, existing_options.id) + + module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id) + + +if __name__ == "__main__": + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_facts.py new file mode 100644 index 00000000..f82f8b3f --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_facts.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_dhcp_option_info +version_added: 1.0.0 +short_description: Gather information about dhcp options sets in AWS +description: + - Gather information about dhcp options sets in AWS. + - This module was called C(ec2_vpc_dhcp_option_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Nick Aslanidis (@naslanidis)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters. + type: dict + dhcp_options_ids: + description: + - Get details of specific DHCP Option IDs. + aliases: ['DhcpOptionIds'] + type: list + elements: str + dry_run: + description: + - Checks whether you have the required permissions to view the DHCP + Options. + aliases: ['DryRun'] + type: bool + default: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all DHCP Option sets for an account or profile + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + register: dhcp_info + +- name: Gather information about a filtered list of DHCP Option sets + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + filters: + "tag:Name": "abc-123" + register: dhcp_info + +- name: Gather information about a specific DHCP Option set by DhcpOptionId + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + DhcpOptionsIds: dopt-123fece2 + register: dhcp_info + +''' + +RETURN = ''' +dhcp_options: + description: The dhcp option sets for the account + returned: always + type: list + +changed: + description: True if listing the dhcp options succeeds + type: bool + returned: always +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def get_dhcp_options_info(dhcp_option): + dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'], + 'DhcpConfigurations': dhcp_option['DhcpConfigurations'], + 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))} + return dhcp_option_info + + +def list_dhcp_options(client, module): + params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters'))) + + if module.params.get("dry_run"): + params['DryRun'] = True + + if module.params.get("dhcp_options_ids"): + params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids") + + try: + all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + return [camel_dict_to_snake_dict(get_dhcp_options_info(option)) + for option in all_dhcp_options['DhcpOptions']] + + +def main(): + argument_spec = dict( + filters=dict(type='dict', default={}), + dry_run=dict(type='bool', default=False, aliases=['DryRun']), + dhcp_options_ids=dict(type='list', elements='str', aliases=['DhcpOptionIds']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + if module._name == 'ec2_vpc_dhcp_option_facts': + module.deprecate("The 'ec2_vpc_dhcp_option_facts' module has been renamed to 'ec2_vpc_dhcp_option_info'", + date='2021-12-01', collection_name='amazon.aws') + + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # call your function here + results = list_dhcp_options(client, module) + + module.exit_json(dhcp_options=results) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py new file mode 100644 index 00000000..f82f8b3f --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_dhcp_option_info +version_added: 1.0.0 +short_description: Gather information about dhcp options sets in AWS +description: + - Gather information about dhcp options sets in AWS. + - This module was called C(ec2_vpc_dhcp_option_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Nick Aslanidis (@naslanidis)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters. + type: dict + dhcp_options_ids: + description: + - Get details of specific DHCP Option IDs. + aliases: ['DhcpOptionIds'] + type: list + elements: str + dry_run: + description: + - Checks whether you have the required permissions to view the DHCP + Options. + aliases: ['DryRun'] + type: bool + default: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all DHCP Option sets for an account or profile + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + register: dhcp_info + +- name: Gather information about a filtered list of DHCP Option sets + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + filters: + "tag:Name": "abc-123" + register: dhcp_info + +- name: Gather information about a specific DHCP Option set by DhcpOptionId + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + DhcpOptionsIds: dopt-123fece2 + register: dhcp_info + +''' + +RETURN = ''' +dhcp_options: + description: The dhcp option sets for the account + returned: always + type: list + +changed: + description: True if listing the dhcp options succeeds + type: bool + returned: always +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def get_dhcp_options_info(dhcp_option): + dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'], + 'DhcpConfigurations': dhcp_option['DhcpConfigurations'], + 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))} + return dhcp_option_info + + +def list_dhcp_options(client, module): + params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters'))) + + if module.params.get("dry_run"): + params['DryRun'] = True + + if module.params.get("dhcp_options_ids"): + params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids") + + try: + all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + return [camel_dict_to_snake_dict(get_dhcp_options_info(option)) + for option in all_dhcp_options['DhcpOptions']] + + +def main(): + argument_spec = dict( + filters=dict(type='dict', default={}), + dry_run=dict(type='bool', default=False, aliases=['DryRun']), + dhcp_options_ids=dict(type='list', elements='str', aliases=['DhcpOptionIds']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + if module._name == 'ec2_vpc_dhcp_option_facts': + module.deprecate("The 'ec2_vpc_dhcp_option_facts' module has been renamed to 'ec2_vpc_dhcp_option_info'", + date='2021-12-01', collection_name='amazon.aws') + + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # call your function here + results = list_dhcp_options(client, module) + + module.exit_json(dhcp_options=results) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py new file mode 100644 index 00000000..0d912031 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py @@ -0,0 +1,535 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +version_added: 1.0.0 +short_description: Configure AWS virtual private clouds +description: + - Create, modify, and terminate AWS virtual private clouds. +author: + - Jonathan Davila (@defionscode) + - Sloane Hertel (@s-hertel) +options: + name: + description: + - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists. + required: yes + type: str + cidr_block: + description: + - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR + and is used in conjunction with the C(name) to ensure idempotence. + required: yes + type: list + elements: str + ipv6_cidr: + description: + - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses, + or the size of the CIDR block. + default: False + type: bool + purge_cidrs: + description: + - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block). + default: no + type: bool + tenancy: + description: + - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. + default: default + choices: [ 'default', 'dedicated' ] + type: str + dns_support: + description: + - Whether to enable AWS DNS support. + default: yes + type: bool + dns_hostnames: + description: + - Whether to enable AWS hostname support. + default: yes + type: bool + dhcp_opts_id: + description: + - The id of the DHCP options to use for this VPC. + type: str + tags: + description: + - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of + the VPC if it's different. + aliases: [ 'resource_tags' ] + type: dict + state: + description: + - The state of the VPC. Either absent or present. + default: present + choices: [ 'present', 'absent' ] + type: str + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want + duplicate VPCs created. + type: bool + default: false +requirements: + - boto3 + - botocore +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: create a VPC with dedicated tenancy and a couple of tags + amazon.aws.ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + region: us-east-1 + tags: + module: ec2_vpc_net + this: works + tenancy: dedicated + +- name: create a VPC with dedicated tenancy and request an IPv6 CIDR + amazon.aws.ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + ipv6_cidr: True + region: us-east-1 + tenancy: dedicated +''' + +RETURN = ''' +vpc: + description: info about the VPC that was created or deleted + returned: always + type: complex + contains: + cidr_block: + description: The CIDR of the VPC + returned: always + type: str + sample: 10.0.0.0/16 + cidr_block_association_set: + description: IPv4 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "cidr_block": "10.0.0.0/24", + "cidr_block_state": { + "state": "associated" + } + } + ] + classic_link_enabled: + description: indicates whether ClassicLink is enabled + returned: always + type: bool + sample: false + dhcp_options_id: + description: the id of the DHCP options associated with this VPC + returned: always + type: str + sample: dopt-12345678 + id: + description: VPC resource id + returned: always + type: str + sample: vpc-12345678 + instance_tenancy: + description: indicates whether VPC uses default or dedicated tenancy + returned: always + type: str + sample: default + ipv6_cidr_block_association_set: + description: IPv6 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "ipv6_cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "ipv6_cidr_block": "2001:db8::/56", + "ipv6_cidr_block_state": { + "state": "associated" + } + } + ] + is_default: + description: indicates whether this is the default VPC + returned: always + type: bool + sample: false + state: + description: state of the VPC + returned: always + type: str + sample: available + tags: + description: tags attached to the VPC, includes name + returned: always + type: complex + contains: + Name: + description: name tag for the VPC + returned: always + type: str + sample: pk_vpc4 + owner_id: + description: The AWS account which owns the VPC. + returned: always + type: str + sample: 123456789012 +''' + +from time import sleep +from time import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.network import to_subnet +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_message +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import compare_aws_tags + + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns None or a vpc object depending on the existence of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return None. + """ + try: + vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': cidr_block}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block) + if not matching_vpcs: + vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': [cidr_block[0]]}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + if multi: + return None + elif len(matching_vpcs) == 1: + return matching_vpcs[0]['VpcId'] + elif len(matching_vpcs) > 1: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + return None + + +@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound']) +def get_classic_link_with_backoff(connection, vpc_id): + try: + return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled') + except is_boto3_error_message('The functionality you requested is not available in this region.'): + return False + + +def get_vpc(module, connection, vpc_id): + # wait for vpc to be available + try: + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id)) + + try: + vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + try: + vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + return vpc_obj + + +def update_vpc_tags(connection, module, vpc_id, tags, name): + if tags is None: + tags = dict() + + tags.update({'Name': name}) + tags = dict((k, to_native(v)) for k, v in tags.items()) + try: + filters = ansible_dict_to_boto3_filter_list({'resource-id': vpc_id}) + current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=filters, aws_retry=True)['Tags']) + tags_to_update, dummy = compare_aws_tags(current_tags, tags, False) + if tags_to_update: + if not module.check_mode: + tags = ansible_dict_to_boto3_tag_list(tags_to_update) + vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True) + + # Wait for tags to be updated + expected_tags = boto3_tag_list_to_ansible_dict(tags) + filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()] + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters) + + return True + else: + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update tags") + + +def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + if vpc_obj['DhcpOptionsId'] != dhcp_id: + if not module.check_mode: + try: + connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'], aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id)) + + try: + # Wait for DhcpOptionsId to be updated + filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}] + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to wait for DhcpOptionsId to be updated") + + return True + else: + return False + + +def create_vpc(connection, module, cidr_block, tenancy): + try: + if not module.check_mode: + vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy, aws_retry=True) + else: + module.exit_json(changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to create the VPC") + + # wait up to 30 seconds for vpc to exist + try: + connection.get_waiter('vpc_exists').wait( + VpcIds=[vpc_obj['Vpc']['VpcId']], + WaiterConfig=dict(MaxAttempts=30) + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId'])) + + return vpc_obj['Vpc']['VpcId'] + + +def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value): + start_time = time() + updated = False + while time() < start_time + 300: + current_value = connection.describe_vpc_attribute( + Attribute=attribute, + VpcId=vpc_id, + aws_retry=True + )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value'] + if current_value != expected_value: + sleep(3) + else: + updated = True + break + if not updated: + module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute)) + + +def get_cidr_network_bits(module, cidr_block): + fixed_cidrs = [] + for cidr in cidr_block: + split_addr = cidr.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 CIDR that may or may not have host bits set + # Get the network bits. + valid_cidr = to_subnet(split_addr[0], split_addr[1]) + if cidr != valid_cidr: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr)) + fixed_cidrs.append(valid_cidr) + else: + # let AWS handle invalid CIDRs + fixed_cidrs.append(cidr) + return fixed_cidrs + + +def main(): + argument_spec = dict( + name=dict(required=True), + cidr_block=dict(type='list', required=True, elements='str'), + ipv6_cidr=dict(type='bool', default=False), + tenancy=dict(choices=['default', 'dedicated'], default='default'), + dns_support=dict(type='bool', default=True), + dns_hostnames=dict(type='bool', default=True), + dhcp_opts_id=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + state=dict(choices=['present', 'absent'], default='present'), + multi_ok=dict(type='bool', default=False), + purge_cidrs=dict(type='bool', default=False), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + name = module.params.get('name') + cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) + ipv6_cidr = module.params.get('ipv6_cidr') + purge_cidrs = module.params.get('purge_cidrs') + tenancy = module.params.get('tenancy') + dns_support = module.params.get('dns_support') + dns_hostnames = module.params.get('dns_hostnames') + dhcp_id = module.params.get('dhcp_opts_id') + tags = module.params.get('tags') + state = module.params.get('state') + multi = module.params.get('multi_ok') + + changed = False + + connection = module.client( + 'ec2', + retry_decorator=AWSRetry.jittered_backoff( + retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'] + ) + ) + + if dns_hostnames and not dns_support: + module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support') + + if state == 'present': + + # Check if VPC exists + vpc_id = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_id is None: + vpc_id = create_vpc(connection, module, cidr_block[0], tenancy) + changed = True + + vpc_obj = get_vpc(module, connection, vpc_id) + + associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) + if cidr['CidrBlockState']['State'] != 'disassociated') + to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs] + to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block] + expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add + + if len(cidr_block) > 1: + for cidr in to_add: + changed = True + try: + connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) + if ipv6_cidr: + if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys(): + module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format( + vpc_id, + vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock'])) + else: + try: + connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id, aws_retry=True) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) + + if purge_cidrs: + for association_id in to_remove: + changed = True + try: + connection.disassociate_vpc_cidr_block(AssociationId=association_id, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " + "are associated with the CIDR block before you can disassociate it.".format(association_id)) + + if dhcp_id is not None: + try: + if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update DHCP options") + + if tags is not None or name is not None: + try: + if update_vpc_tags(connection, module, vpc_id, tags, name): + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update tags") + + current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value'] + current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value'] + if current_dns_enabled != dns_support: + changed = True + if not module.check_mode: + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns support attribute") + if current_dns_hostnames != dns_hostnames: + changed = True + if not module.check_mode: + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute") + + # wait for associated cidrs to match + if to_add or to_remove: + try: + connection.get_waiter('vpc_available').wait( + VpcIds=[vpc_id], + Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to wait for CIDRs to update", vpc_id=vpc_id) + + # try to wait for enableDnsSupport and enableDnsHostnames to match + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) + + final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id)) + final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', [])) + final_state['id'] = final_state.pop('vpc_id') + debugging = dict(to_add=to_add, to_remove=to_remove, expected_cidrs=expected_cidrs) + + module.exit_json(changed=changed, vpc=final_state, debugging=debugging) + + elif state == 'absent': + + # Check if VPC exists + vpc_id = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_id is not None: + try: + if not module.check_mode: + connection.delete_vpc(VpcId=vpc_id, aws_retry=True) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id)) + + module.exit_json(changed=changed, vpc={}) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_facts.py new file mode 100644 index 00000000..62a9b1ee --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_facts.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net_info +version_added: 1.0.0 +short_description: Gather information about ec2 VPCs in AWS +description: + - Gather information about ec2 VPCs in AWS + - This module was called C(ec2_vpc_net_facts) before Ansible 2.9. The usage did not change. +author: "Rob White (@wimnat)" +requirements: + - boto3 + - botocore +options: + vpc_ids: + description: + - A list of VPC IDs that exist in your account. + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters. + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all VPCs +- amazon.aws.ec2_vpc_net_info: + +# Gather information about a particular VPC using VPC ID +- amazon.aws.ec2_vpc_net_info: + vpc_ids: vpc-00112233 + +# Gather information about any VPC with a tag key Name and value Example +- amazon.aws.ec2_vpc_net_info: + filters: + "tag:Name": Example + +''' + +RETURN = ''' +vpcs: + description: Returns an array of complex objects as described below. + returned: success + type: complex + contains: + id: + description: The ID of the VPC (for backwards compatibility). + returned: always + type: str + vpc_id: + description: The ID of the VPC . + returned: always + type: str + state: + description: The state of the VPC. + returned: always + type: str + tags: + description: A dict of tags associated with the VPC. + returned: always + type: dict + instance_tenancy: + description: The instance tenancy setting for the VPC. + returned: always + type: str + is_default: + description: True if this is the default VPC for account. + returned: always + type: bool + cidr_block: + description: The IPv4 CIDR block assigned to the VPC. + returned: always + type: str + classic_link_dns_supported: + description: True/False depending on attribute setting for classic link DNS support. + returned: always + type: bool + classic_link_enabled: + description: True/False depending on if classic link support is enabled. + returned: always + type: bool + enable_dns_hostnames: + description: True/False depending on attribute setting for DNS hostnames support. + returned: always + type: bool + enable_dns_support: + description: True/False depending on attribute setting for DNS support. + returned: always + type: bool + cidr_block_association_set: + description: An array of IPv4 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + cidr_block: + description: The IPv4 CIDR block that is associated with the VPC. + returned: always + type: str + cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the VPC. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str + owner_id: + description: The AWS account which owns the VPC. + returned: always + type: str + sample: 123456789012 + dhcp_options_id: + description: The ID of the DHCP options associated with this VPC. + returned: always + type: str + sample: dopt-12345678 +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def describe_vpcs(connection, module): + """ + Describe VPCs. + + connection : boto3 client connection object + module : AnsibleAWSModule object + """ + # collect parameters + filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + vpc_ids = module.params.get('vpc_ids') + + # init empty list for return vars + vpc_info = list() + vpc_list = list() + + # Get the basic VPC info + try: + response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids)) + + # Loop through results and create a list of VPC IDs + for vpc in response['Vpcs']: + vpc_list.append(vpc['VpcId']) + + # We can get these results in bulk but still needs two separate calls to the API + try: + cl_enabled = connection.describe_vpc_classic_link(VpcIds=vpc_list, aws_retry=True) + except is_boto3_error_code('UnsupportedOperation'): + cl_enabled = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkEnabled': False} for vpc_id in vpc_list]} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Unable to describe if ClassicLink is enabled') + + try: + cl_dns_support = connection.describe_vpc_classic_link_dns_support(VpcIds=vpc_list, aws_retry=True) + except is_boto3_error_code('UnsupportedOperation'): + cl_dns_support = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkDnsSupported': False} for vpc_id in vpc_list]} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Unable to describe if ClassicLinkDns is supported') + + # Loop through the results and add the other VPC attributes we gathered + for vpc in response['Vpcs']: + error_message = "Unable to describe VPC attribute {0}" + # We have to make two separate calls per VPC to get these attributes. + try: + dns_support = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], + Attribute='enableDnsSupport', aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=error_message.format('enableDnsSupport')) + try: + dns_hostnames = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], + Attribute='enableDnsHostnames', aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=error_message.format('enableDnsHostnames')) + + # loop through the ClassicLink Enabled results and add the value for the correct VPC + for item in cl_enabled['Vpcs']: + if vpc['VpcId'] == item['VpcId']: + vpc['ClassicLinkEnabled'] = item['ClassicLinkEnabled'] + + # loop through the ClassicLink DNS support results and add the value for the correct VPC + for item in cl_dns_support['Vpcs']: + if vpc['VpcId'] == item['VpcId']: + vpc['ClassicLinkDnsSupported'] = item['ClassicLinkDnsSupported'] + + # add the two DNS attributes + vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value') + vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value') + # for backwards compatibility + vpc['id'] = vpc['VpcId'] + vpc_info.append(camel_dict_to_snake_dict(vpc)) + # convert tag list to ansible dict + vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', [])) + + module.exit_json(vpcs=vpc_info) + + +def main(): + argument_spec = dict( + vpc_ids=dict(type='list', elements='str', default=[]), + filters=dict(type='dict', default={}) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vpc_net_facts': + module.deprecate("The 'ec2_vpc_net_facts' module has been renamed to 'ec2_vpc_net_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + describe_vpcs(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py new file mode 100644 index 00000000..62a9b1ee --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net_info +version_added: 1.0.0 +short_description: Gather information about ec2 VPCs in AWS +description: + - Gather information about ec2 VPCs in AWS + - This module was called C(ec2_vpc_net_facts) before Ansible 2.9. The usage did not change. +author: "Rob White (@wimnat)" +requirements: + - boto3 + - botocore +options: + vpc_ids: + description: + - A list of VPC IDs that exist in your account. + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters. + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all VPCs +- amazon.aws.ec2_vpc_net_info: + +# Gather information about a particular VPC using VPC ID +- amazon.aws.ec2_vpc_net_info: + vpc_ids: vpc-00112233 + +# Gather information about any VPC with a tag key Name and value Example +- amazon.aws.ec2_vpc_net_info: + filters: + "tag:Name": Example + +''' + +RETURN = ''' +vpcs: + description: Returns an array of complex objects as described below. + returned: success + type: complex + contains: + id: + description: The ID of the VPC (for backwards compatibility). + returned: always + type: str + vpc_id: + description: The ID of the VPC . + returned: always + type: str + state: + description: The state of the VPC. + returned: always + type: str + tags: + description: A dict of tags associated with the VPC. + returned: always + type: dict + instance_tenancy: + description: The instance tenancy setting for the VPC. + returned: always + type: str + is_default: + description: True if this is the default VPC for account. + returned: always + type: bool + cidr_block: + description: The IPv4 CIDR block assigned to the VPC. + returned: always + type: str + classic_link_dns_supported: + description: True/False depending on attribute setting for classic link DNS support. + returned: always + type: bool + classic_link_enabled: + description: True/False depending on if classic link support is enabled. + returned: always + type: bool + enable_dns_hostnames: + description: True/False depending on attribute setting for DNS hostnames support. + returned: always + type: bool + enable_dns_support: + description: True/False depending on attribute setting for DNS support. + returned: always + type: bool + cidr_block_association_set: + description: An array of IPv4 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + cidr_block: + description: The IPv4 CIDR block that is associated with the VPC. + returned: always + type: str + cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the VPC. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str + owner_id: + description: The AWS account which owns the VPC. + returned: always + type: str + sample: 123456789012 + dhcp_options_id: + description: The ID of the DHCP options associated with this VPC. + returned: always + type: str + sample: dopt-12345678 +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def describe_vpcs(connection, module): + """ + Describe VPCs. + + connection : boto3 client connection object + module : AnsibleAWSModule object + """ + # collect parameters + filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + vpc_ids = module.params.get('vpc_ids') + + # init empty list for return vars + vpc_info = list() + vpc_list = list() + + # Get the basic VPC info + try: + response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids)) + + # Loop through results and create a list of VPC IDs + for vpc in response['Vpcs']: + vpc_list.append(vpc['VpcId']) + + # We can get these results in bulk but still needs two separate calls to the API + try: + cl_enabled = connection.describe_vpc_classic_link(VpcIds=vpc_list, aws_retry=True) + except is_boto3_error_code('UnsupportedOperation'): + cl_enabled = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkEnabled': False} for vpc_id in vpc_list]} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Unable to describe if ClassicLink is enabled') + + try: + cl_dns_support = connection.describe_vpc_classic_link_dns_support(VpcIds=vpc_list, aws_retry=True) + except is_boto3_error_code('UnsupportedOperation'): + cl_dns_support = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkDnsSupported': False} for vpc_id in vpc_list]} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Unable to describe if ClassicLinkDns is supported') + + # Loop through the results and add the other VPC attributes we gathered + for vpc in response['Vpcs']: + error_message = "Unable to describe VPC attribute {0}" + # We have to make two separate calls per VPC to get these attributes. + try: + dns_support = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], + Attribute='enableDnsSupport', aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=error_message.format('enableDnsSupport')) + try: + dns_hostnames = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], + Attribute='enableDnsHostnames', aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=error_message.format('enableDnsHostnames')) + + # loop through the ClassicLink Enabled results and add the value for the correct VPC + for item in cl_enabled['Vpcs']: + if vpc['VpcId'] == item['VpcId']: + vpc['ClassicLinkEnabled'] = item['ClassicLinkEnabled'] + + # loop through the ClassicLink DNS support results and add the value for the correct VPC + for item in cl_dns_support['Vpcs']: + if vpc['VpcId'] == item['VpcId']: + vpc['ClassicLinkDnsSupported'] = item['ClassicLinkDnsSupported'] + + # add the two DNS attributes + vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value') + vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value') + # for backwards compatibility + vpc['id'] = vpc['VpcId'] + vpc_info.append(camel_dict_to_snake_dict(vpc)) + # convert tag list to ansible dict + vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', [])) + + module.exit_json(vpcs=vpc_info) + + +def main(): + argument_spec = dict( + vpc_ids=dict(type='list', elements='str', default=[]), + filters=dict(type='dict', default={}) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vpc_net_facts': + module.deprecate("The 'ec2_vpc_net_facts' module has been renamed to 'ec2_vpc_net_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + describe_vpcs(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py new file mode 100644 index 00000000..d9b34a1b --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py @@ -0,0 +1,599 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet +version_added: 1.0.0 +short_description: Manage subnets in AWS virtual private clouds +description: + - Manage subnets in AWS virtual private clouds. +author: +- Robert Estelle (@erydo) +- Brad Davidson (@brandond) +requirements: [ boto3 ] +options: + az: + description: + - "The availability zone for the subnet." + type: str + cidr: + description: + - "The CIDR block for the subnet. E.g. 192.0.2.0/24." + type: str + required: true + ipv6_cidr: + description: + - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range." + - "Required if I(assign_instances_ipv6=true)" + type: str + tags: + description: + - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed." + aliases: [ 'resource_tags' ] + type: dict + state: + description: + - "Create or remove the subnet." + default: present + choices: [ 'present', 'absent' ] + type: str + vpc_id: + description: + - "VPC ID of the VPC in which to create or delete the subnet." + required: true + type: str + map_public: + description: + - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default." + type: bool + default: 'no' + assign_instances_ipv6: + description: + - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address." + type: bool + default: false + wait: + description: + - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing." + type: bool + default: true + wait_timeout: + description: + - "Number of seconds to wait for subnet to become available I(wait=True)." + default: 300 + type: int + purge_tags: + description: + - Whether or not to remove tags that do not appear in the I(tags) list. + type: bool + default: true +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create subnet for database servers + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + tags: + Name: Database Subnet + register: database_subnet + +- name: Remove subnet for database servers + amazon.aws.ec2_vpc_subnet: + state: absent + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + +- name: Create subnet with IPv6 block assigned + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: 2001:db8:0:102::/64 + +- name: Remove IPv6 block assigned to subnet + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: '' +''' + +RETURN = ''' +subnet: + description: Dictionary of subnet values + returned: I(state=present) + type: complex + contains: + id: + description: Subnet resource id + returned: I(state=present) + type: str + sample: subnet-b883b2c4 + cidr_block: + description: The IPv4 CIDR of the Subnet + returned: I(state=present) + type: str + sample: "10.0.0.0/16" + ipv6_cidr_block: + description: The IPv6 CIDR block actively associated with the Subnet + returned: I(state=present) + type: str + sample: "2001:db8:0:102::/64" + availability_zone: + description: Availability zone of the Subnet + returned: I(state=present) + type: str + sample: us-east-1a + state: + description: state of the Subnet + returned: I(state=present) + type: str + sample: available + tags: + description: tags attached to the Subnet, includes name + returned: I(state=present) + type: dict + sample: {"Name": "My Subnet", "env": "staging"} + map_public_ip_on_launch: + description: whether public IP is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + assign_ipv6_address_on_creation: + description: whether IPv6 address is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + vpc_id: + description: the id of the VPC where this Subnet exists + returned: I(state=present) + type: str + sample: vpc-67236184 + available_ip_address_count: + description: number of available IPv4 addresses + returned: I(state=present) + type: str + sample: 251 + default_for_az: + description: indicates whether this is the default Subnet for this Availability Zone + returned: I(state=present) + type: bool + sample: false + ipv6_association_id: + description: The IPv6 association ID for the currently associated CIDR + returned: I(state=present) + type: str + sample: subnet-cidr-assoc-b85c74d2 + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: I(state=present) + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the subnet. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str +''' + + +import time + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import compare_aws_tags +from ..module_utils.waiters import get_waiter + + +def get_subnet_info(subnet): + if 'Subnets' in subnet: + return [get_subnet_info(s) for s in subnet['Subnets']] + elif 'Subnet' in subnet: + subnet = camel_dict_to_snake_dict(subnet['Subnet']) + else: + subnet = camel_dict_to_snake_dict(subnet) + + if 'tags' in subnet: + subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags']) + else: + subnet['tags'] = dict() + + if 'subnet_id' in subnet: + subnet['id'] = subnet['subnet_id'] + del subnet['subnet_id'] + + subnet['ipv6_cidr_block'] = '' + subnet['ipv6_association_id'] = '' + ipv6set = subnet.get('ipv6_cidr_block_association_set') + if ipv6set: + for item in ipv6set: + if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'): + subnet['ipv6_cidr_block'] = item['ipv6_cidr_block'] + subnet['ipv6_association_id'] = item['association_id'] + + return subnet + + +@AWSRetry.exponential_backoff() +def describe_subnets_with_backoff(client, **params): + return client.describe_subnets(**params) + + +def waiter_params(module, params, start_time): + if not module.botocore_at_least("1.7.0"): + remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time()) + params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5} + return params + + +def handle_waiter(conn, module, waiter_name, params, start_time): + try: + get_waiter(conn, waiter_name).wait( + **waiter_params(module, params, start_time) + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, "Failed to wait for updates to complete") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "An exception happened while trying to wait for updates") + + +def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None): + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] + + params = dict(VpcId=vpc_id, + CidrBlock=cidr) + + if ipv6_cidr: + params['Ipv6CidrBlock'] = ipv6_cidr + + if az: + params['AvailabilityZone'] = az + + try: + subnet = get_subnet_info(conn.create_subnet(**params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create subnet") + + # Sometimes AWS takes its time to create a subnet and so using + # new subnets's id to do things like create tags results in + # exception. + if wait and subnet.get('state') != 'available': + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + try: + conn.get_waiter('subnet_available').wait( + **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time) + ) + subnet['state'] = 'available' + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available") + + return subnet + + +def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): + changed = False + + filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'}) + try: + cur_tags = conn.describe_tags(Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe tags") + + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) + + if to_update: + try: + if not module.check_mode: + AWSRetry.exponential_backoff( + catch_extra_error_codes=['InvalidSubnetID.NotFound'] + )(conn.create_tags)( + Resources=[subnet['id']], + Tags=ansible_dict_to_boto3_tag_list(to_update) + ) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create tags") + + if to_delete: + try: + if not module.check_mode: + tags_list = [] + for key in to_delete: + tags_list.append({'Key': key}) + + AWSRetry.exponential_backoff( + catch_extra_error_codes=['InvalidSubnetID.NotFound'] + )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete tags") + + if module.params['wait'] and not module.check_mode: + # Wait for tags to be updated + filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()] + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + return changed + + +def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def disassociate_ipv6_cidr(conn, module, subnet, start_time): + if subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time) + + try: + conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}" + .format(subnet['ipv6_association_id'], subnet['id'])) + + # Wait for cidr block to be disassociated + if module.params['wait']: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['disassociated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + +def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time): + wait = module.params['wait'] + changed = False + + if subnet['ipv6_association_id'] and not ipv6_cidr: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + if ipv6_cidr: + filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr, + 'vpc-id': subnet['vpc_id']}) + + try: + check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get subnet info") + + if check_subnets and check_subnets[0]['ipv6_cidr_block']: + module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr)) + + if subnet['ipv6_association_id']: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + try: + if not check_mode: + associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id'])) + else: + if not check_mode and wait: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['associated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'): + subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId'] + subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + if subnet['ipv6_cidr_block_association_set']: + subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']) + else: + subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])) + + return changed + + +def get_matching_subnet(conn, module, vpc_id, cidr): + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr}) + try: + subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get matching subnet") + + if subnets: + return subnets[0] + + return None + + +def ensure_subnet_present(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + changed = False + + # Initialize start so max time does not exceed the specified wait_timeout for multiple operations + start_time = time.time() + + if subnet is None: + if not module.check_mode: + subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'], + ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time) + changed = True + # Subnet will be None when check_mode is true + if subnet is None: + return { + 'changed': changed, + 'subnet': {} + } + if module.params['wait']: + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'): + if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time): + changed = True + + if module.params['map_public'] != subnet['map_public_ip_on_launch']: + ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time) + changed = True + + if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time) + changed = True + + if module.params['tags'] != subnet['tags']: + stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items()) + if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time): + changed = True + + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if not module.check_mode and module.params['wait']: + # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation + # so we only wait for those if necessary just before returning the subnet + subnet = ensure_final_subnet(conn, module, subnet, start_time) + + return { + 'changed': changed, + 'subnet': subnet + } + + +def ensure_final_subnet(conn, module, subnet, start_time): + for rewait in range(0, 30): + map_public_correct = False + assign_ipv6_correct = False + + if module.params['map_public'] == subnet['map_public_ip_on_launch']: + map_public_correct = True + else: + if module.params['map_public']: + handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'): + assign_ipv6_correct = True + else: + if module.params['assign_instances_ipv6']: + handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + + if map_public_correct and assign_ipv6_correct: + break + + time.sleep(5) + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + + return subnet + + +def ensure_subnet_absent(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if subnet is None: + return {'changed': False} + + try: + if not module.check_mode: + conn.delete_subnet(SubnetId=subnet['id']) + if module.params['wait']: + handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time()) + return {'changed': True} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete subnet") + + +def main(): + argument_spec = dict( + az=dict(default=None, required=False), + cidr=dict(required=True), + ipv6_cidr=dict(default='', required=False), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']), + vpc_id=dict(required=True), + map_public=dict(default=False, required=False, type='bool'), + assign_instances_ipv6=dict(default=False, required=False, type='bool'), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300, required=False), + purge_tags=dict(default=True, type='bool') + ) + + required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + + if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'): + module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string") + + if not module.botocore_at_least("1.7.0"): + module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times") + + connection = module.client('ec2') + + state = module.params.get('state') + + try: + if state == 'present': + result = ensure_subnet_present(connection, module) + elif state == 'absent': + result = ensure_subnet_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_facts.py new file mode 100644 index 00000000..316d532e --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_facts.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet_info +version_added: 1.0.0 +short_description: Gather information about ec2 VPC subnets in AWS +description: + - Gather information about ec2 VPC subnets in AWS + - This module was called C(ec2_vpc_subnet_facts) before Ansible 2.9. The usage did not change. +author: "Rob White (@wimnat)" +requirements: + - boto3 + - botocore +options: + subnet_ids: + description: + - A list of subnet IDs to gather information for. + aliases: ['subnet_id'] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters. + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all VPC subnets +- amazon.aws.ec2_vpc_subnet_info: + +# Gather information about a particular VPC subnet using ID +- amazon.aws.ec2_vpc_subnet_info: + subnet_ids: subnet-00112233 + +# Gather information about any VPC subnet with a tag key Name and value Example +- amazon.aws.ec2_vpc_subnet_info: + filters: + "tag:Name": Example + +# Gather information about any VPC subnet within VPC with ID vpc-abcdef00 +- amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: vpc-abcdef00 + +# Gather information about a set of VPC subnets, publicA, publicB and publicC within a +# VPC with ID vpc-abcdef00 and then use the jinja map function to return the +# subnet_ids as a list. + +- amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: vpc-abcdef00 + "tag:Name": "{{ item }}" + loop: + - publicA + - publicB + - publicC + register: subnet_info + +- set_fact: + subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}" +''' + +RETURN = ''' +subnets: + description: Returns an array of complex objects as described below. + returned: success + type: complex + contains: + subnet_id: + description: The ID of the Subnet. + returned: always + type: str + id: + description: The ID of the Subnet (for backwards compatibility). + returned: always + type: str + vpc_id: + description: The ID of the VPC . + returned: always + type: str + state: + description: The state of the subnet. + returned: always + type: str + tags: + description: A dict of tags associated with the Subnet. + returned: always + type: dict + map_public_ip_on_launch: + description: True/False depending on attribute setting for public IP mapping. + returned: always + type: bool + default_for_az: + description: True if this is the default subnet for AZ. + returned: always + type: bool + cidr_block: + description: The IPv4 CIDR block assigned to the subnet. + returned: always + type: str + available_ip_address_count: + description: Count of available IPs in subnet. + returned: always + type: str + availability_zone: + description: The availability zone where the subnet exists. + returned: always + type: str + assign_ipv6_address_on_creation: + description: True/False depending on attribute setting for IPv6 address assignment. + returned: always + type: bool + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the subnet. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +@AWSRetry.exponential_backoff() +def describe_subnets_with_backoff(connection, subnet_ids, filters): + """ + Describe Subnets with AWSRetry backoff throttling support. + + connection : boto3 client connection object + subnet_ids : list of subnet ids for which to gather information + filters : additional filters to apply to request + """ + return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters) + + +def describe_subnets(connection, module): + """ + Describe Subnets. + + module : AnsibleAWSModule object + connection : boto3 client connection object + """ + # collect parameters + filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + subnet_ids = module.params.get('subnet_ids') + + if subnet_ids is None: + # Set subnet_ids to empty list if it is None + subnet_ids = [] + + # init empty list for return vars + subnet_info = list() + + # Get the basic VPC info + try: + response = describe_subnets_with_backoff(connection, subnet_ids, filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe subnets') + + for subnet in response['Subnets']: + # for backwards compatibility + subnet['id'] = subnet['SubnetId'] + subnet_info.append(camel_dict_to_snake_dict(subnet)) + # convert tag list to ansible dict + subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', [])) + + module.exit_json(subnets=subnet_info) + + +def main(): + argument_spec = dict( + subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']), + filters=dict(type='dict', default={}) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + if module._name == 'ec2_vpc_subnet_facts': + module.deprecate("The 'ec2_vpc_subnet_facts' module has been renamed to 'ec2_vpc_subnet_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2') + + describe_subnets(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py new file mode 100644 index 00000000..316d532e --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet_info +version_added: 1.0.0 +short_description: Gather information about ec2 VPC subnets in AWS +description: + - Gather information about ec2 VPC subnets in AWS + - This module was called C(ec2_vpc_subnet_facts) before Ansible 2.9. The usage did not change. +author: "Rob White (@wimnat)" +requirements: + - boto3 + - botocore +options: + subnet_ids: + description: + - A list of subnet IDs to gather information for. + aliases: ['subnet_id'] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters. + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all VPC subnets +- amazon.aws.ec2_vpc_subnet_info: + +# Gather information about a particular VPC subnet using ID +- amazon.aws.ec2_vpc_subnet_info: + subnet_ids: subnet-00112233 + +# Gather information about any VPC subnet with a tag key Name and value Example +- amazon.aws.ec2_vpc_subnet_info: + filters: + "tag:Name": Example + +# Gather information about any VPC subnet within VPC with ID vpc-abcdef00 +- amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: vpc-abcdef00 + +# Gather information about a set of VPC subnets, publicA, publicB and publicC within a +# VPC with ID vpc-abcdef00 and then use the jinja map function to return the +# subnet_ids as a list. + +- amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: vpc-abcdef00 + "tag:Name": "{{ item }}" + loop: + - publicA + - publicB + - publicC + register: subnet_info + +- set_fact: + subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}" +''' + +RETURN = ''' +subnets: + description: Returns an array of complex objects as described below. + returned: success + type: complex + contains: + subnet_id: + description: The ID of the Subnet. + returned: always + type: str + id: + description: The ID of the Subnet (for backwards compatibility). + returned: always + type: str + vpc_id: + description: The ID of the VPC . + returned: always + type: str + state: + description: The state of the subnet. + returned: always + type: str + tags: + description: A dict of tags associated with the Subnet. + returned: always + type: dict + map_public_ip_on_launch: + description: True/False depending on attribute setting for public IP mapping. + returned: always + type: bool + default_for_az: + description: True if this is the default subnet for AZ. + returned: always + type: bool + cidr_block: + description: The IPv4 CIDR block assigned to the subnet. + returned: always + type: str + available_ip_address_count: + description: Count of available IPs in subnet. + returned: always + type: str + availability_zone: + description: The availability zone where the subnet exists. + returned: always + type: str + assign_ipv6_address_on_creation: + description: True/False depending on attribute setting for IPv6 address assignment. + returned: always + type: bool + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the subnet. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +@AWSRetry.exponential_backoff() +def describe_subnets_with_backoff(connection, subnet_ids, filters): + """ + Describe Subnets with AWSRetry backoff throttling support. + + connection : boto3 client connection object + subnet_ids : list of subnet ids for which to gather information + filters : additional filters to apply to request + """ + return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters) + + +def describe_subnets(connection, module): + """ + Describe Subnets. + + module : AnsibleAWSModule object + connection : boto3 client connection object + """ + # collect parameters + filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + subnet_ids = module.params.get('subnet_ids') + + if subnet_ids is None: + # Set subnet_ids to empty list if it is None + subnet_ids = [] + + # init empty list for return vars + subnet_info = list() + + # Get the basic VPC info + try: + response = describe_subnets_with_backoff(connection, subnet_ids, filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe subnets') + + for subnet in response['Subnets']: + # for backwards compatibility + subnet['id'] = subnet['SubnetId'] + subnet_info.append(camel_dict_to_snake_dict(subnet)) + # convert tag list to ansible dict + subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', [])) + + module.exit_json(subnets=subnet_info) + + +def main(): + argument_spec = dict( + subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']), + filters=dict(type='dict', default={}) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + if module._name == 'ec2_vpc_subnet_facts': + module.deprecate("The 'ec2_vpc_subnet_facts' module has been renamed to 'ec2_vpc_subnet_info'", date='2021-12-01', collection_name='amazon.aws') + + connection = module.client('ec2') + + describe_subnets(connection, module) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py new file mode 100644 index 00000000..3c4f6422 --- /dev/null +++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py @@ -0,0 +1,876 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: s3_bucket +version_added: 1.0.0 +short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID +description: + - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + force: + description: + - When trying to delete a bucket, delete all keys (including versions and delete markers) + in the bucket first (an S3 bucket must be empty for a successful deletion). + type: bool + default: 'no' + name: + description: + - Name of the S3 bucket. + required: true + type: str + policy: + description: + - The JSON policy as a string. + type: json + s3_url: + description: + - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and FakeS3 etc. + - Assumes AWS if not specified. + - For Walrus, use FQDN of the endpoint without scheme nor path. + aliases: [ S3_URL ] + type: str + ceph: + description: + - Enable API compatibility with Ceph. It takes into account the S3 API subset working + with Ceph in order to provide the same module behaviour where possible. + type: bool + default: false + requester_pays: + description: + - With Requester Pays buckets, the requester instead of the bucket owner pays the cost + of the request and the data download from the bucket. + type: bool + state: + description: + - Create or remove the S3 bucket. + required: false + default: present + choices: [ 'present', 'absent' ] + type: str + tags: + description: + - Tags dict to apply to bucket. + type: dict + purge_tags: + description: + - Whether to remove tags that aren't present in the I(tags) parameter. + type: bool + default: True + versioning: + description: + - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended). + type: bool + encryption: + description: + - Describes the default server-side encryption to apply to new objects in the bucket. + In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly. + choices: [ 'none', 'AES256', 'aws:kms' ] + type: str + encryption_key_id: + description: KMS master key ID to use for the default encryption. This parameter is allowed if I(encryption) is C(aws:kms). If + not specified then it will default to the AWS provided KMS key. + type: str + public_access: + description: + - Configure public access block for S3 bucket. + - This option cannot be used together with I(delete_public_access). + suboptions: + block_public_acls: + description: Sets BlockPublicAcls value. + type: bool + default: False + block_public_policy: + description: Sets BlockPublicPolicy value. + type: bool + default: False + ignore_public_acls: + description: Sets IgnorePublicAcls value. + type: bool + default: False + restrict_public_buckets: + description: Sets RestrictPublicAcls value. + type: bool + default: False + type: dict + version_added: 1.3.0 + delete_public_access: + description: + - Delete public access block configuration from bucket. + - This option cannot be used together with a I(public_access) definition. + default: false + type: bool + version_added: 1.3.0 + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +notes: + - If C(requestPayment), C(policy), C(tagging) or C(versioning) + operations/API aren't implemented by the endpoint, module doesn't fail + if each parameter satisfies the following condition. + I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None). +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a simple S3 bucket +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + +# Create a simple S3 bucket on Ceph Rados Gateway +- amazon.aws.s3_bucket: + name: mys3bucket + s3_url: http://your-ceph-rados-gateway-server.xxx + ceph: true + +# Remove an S3 bucket and any keys it contains +- amazon.aws.s3_bucket: + name: mys3bucket + state: absent + force: yes + +# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag +- amazon.aws.s3_bucket: + name: mys3bucket + policy: "{{ lookup('file','policy.json') }}" + requester_pays: yes + versioning: yes + tags: + example: tag1 + another: tag2 + +# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint +- amazon.aws.s3_bucket: + name: mydobucket + s3_url: 'https://nyc3.digitaloceanspaces.com' + +# Create a bucket with AES256 encryption +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + encryption: "AES256" + +# Create a bucket with aws:kms encryption, KMS key +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" + encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example" + +# Create a bucket with aws:kms encryption, default key +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" + +# Create a bucket with public policy block configuration +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + public_access: + BlockPublicAcls: true + IgnorePublicAcls: true + ## keys == 'false' can be ommited, undefined keys defaults to 'false' + # BlockPublicPolicy: false + # RestrictPublicBuckets: false + +# Delete public policy block from bucket +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + delete_public_access: true +''' + +import json +import os +import time + +try: + from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.basic import to_text +from ansible.module_utils.six import string_types +from ansible.module_utils.six.moves.urllib.parse import urlparse + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto3_conn +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import compare_policies +from ..module_utils.ec2 import get_aws_connection_info +from ..module_utils.ec2 import snake_dict_to_camel_dict + + +def create_or_update_bucket(s3_client, module, location): + + policy = module.params.get("policy") + name = module.params.get("name") + requester_pays = module.params.get("requester_pays") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + versioning = module.params.get("versioning") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + public_access = module.params.get("public_access") + delete_public_access = module.params.get("delete_public_access") + changed = False + result = {} + + try: + bucket_is_present = bucket_exists(s3_client, name) + except EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + try: + bucket_changed = create_bucket(s3_client, name, location) + s3_client.get_waiter('bucket_exists').wait(Bucket=name) + changed = changed or bucket_changed + except WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed while creating bucket") + + # Versioning + try: + versioning_status = get_bucket_versioning(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as exp: + if versioning is not None: + module.fail_json_aws(exp, msg="Failed to get bucket versioning") + except (BotoCoreError, ClientError) as exp: + module.fail_json_aws(exp, msg="Failed to get bucket versioning") + else: + if versioning is not None: + required_versioning = None + if versioning and versioning_status.get('Status') != "Enabled": + required_versioning = 'Enabled' + elif not versioning and versioning_status.get('Status') == "Enabled": + required_versioning = 'Suspended' + + if required_versioning: + try: + put_bucket_versioning(s3_client, name, required_versioning) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket versioning") + + versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) + + # This output format is there to ensure compatibility with previous versions of the module + result['versioning'] = { + 'Versioning': versioning_status.get('Status', 'Disabled'), + 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), + } + + # Requester pays + try: + requester_pays_status = get_bucket_request_payment(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']): + if requester_pays is not None: + module.fail_json_aws(exp, msg="Failed to get bucket request payment") + except (BotoCoreError, ClientError) as exp: + module.fail_json_aws(exp, msg="Failed to get bucket request payment") + else: + if requester_pays is not None: + payer = 'Requester' if requester_pays else 'BucketOwner' + if requester_pays_status != payer: + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) + if requester_pays_status is None: + # We have seen that it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) + changed = True + + result['requester_pays'] = requester_pays + + # Policy + try: + current_policy = get_bucket_policy(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']): + if policy is not None: + module.fail_json_aws(exp, msg="Failed to get bucket policy") + except (BotoCoreError, ClientError) as exp: + module.fail_json_aws(exp, msg="Failed to get bucket policy") + else: + if policy is not None: + if isinstance(policy, string_types): + policy = json.loads(policy) + + if not policy and current_policy: + try: + delete_bucket_policy(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy) + changed = True + elif compare_policies(current_policy, policy): + try: + put_bucket_policy(s3_client, name, policy) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False) + if current_policy is None: + # As for request payement, it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_policy(s3_client, name, policy) + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) + changed = True + + result['policy'] = current_policy + + # Tags + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']): + if tags is not None: + module.fail_json_aws(exp, msg="Failed to get bucket tags") + except (ClientError, BotoCoreError) as exp: + module.fail_json_aws(exp, msg="Failed to get bucket tags") + else: + if tags is not None: + # Tags are always returned as text + tags = dict((to_text(k), to_text(v)) for k, v in tags.items()) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_copy = current_tags_dict.copy() + current_copy.update(tags) + tags = current_copy + if current_tags_dict != tags: + if tags: + try: + put_bucket_tagging(s3_client, name, tags) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket tags") + else: + if purge_tags: + try: + delete_bucket_tagging(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket tags") + current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) + changed = True + + result['tags'] = current_tags_dict + + # Encryption + try: + current_encryption = get_bucket_encryption(s3_client, name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket encryption") + + if encryption is not None: + current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None + current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None + if encryption == 'none' and current_encryption_algorithm is not None: + try: + delete_bucket_encryption(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, None) + changed = True + elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id): + expected_encryption = {'SSEAlgorithm': encryption} + if encryption == 'aws:kms' and encryption_key_id is not None: + expected_encryption.update({'KMSMasterKeyID': encryption_key_id}) + current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption) + changed = True + + result['encryption'] = current_encryption + + # Public access clock configuration + current_public_access = {} + + # -- Create / Update public access block + if public_access is not None: + try: + current_public_access = get_bucket_public_access(s3_client, name) + except (ClientError, BotoCoreError) as err_public_access: + module.fail_json_aws(err_public_access, msg="Failed to get bucket public access configuration") + camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True) + + if current_public_access == camel_public_block: + result['public_access_block'] = current_public_access + else: + put_bucket_public_access(s3_client, name, camel_public_block) + changed = True + result['public_access_block'] = camel_public_block + + # -- Delete public access block + if delete_public_access: + try: + current_public_access = get_bucket_public_access(s3_client, name) + except (ClientError, BotoCoreError) as err_public_access: + module.fail_json_aws(err_public_access, msg="Failed to get bucket public access configuration") + + if current_public_access == {}: + result['public_access_block'] = current_public_access + else: + delete_bucket_public_access(s3_client, name) + changed = True + result['public_access_block'] = {} + + # Module exit + module.exit_json(changed=changed, name=name, **result) + + +def bucket_exists(s3_client, bucket_name): + # head_bucket appeared to be really inconsistent, so we use list_buckets instead, + # and loop over all the buckets, even if we know it's less performant :( + all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets'] + return any(bucket['Name'] == bucket_name for bucket in all_buckets) + + +@AWSRetry.exponential_backoff(max_delay=120) +def create_bucket(s3_client, bucket_name, location): + try: + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + if len(configuration) > 0: + s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration) + else: + s3_client.create_bucket(Bucket=bucket_name) + return True + except is_boto3_error_code('BucketAlreadyOwnedByYou'): + # We should never get here since we check the bucket presence before calling the create_or_update_bucket + # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception + return False + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_tagging(s3_client, bucket_name, tags): + s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_policy(s3_client, bucket_name, policy): + s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy)) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_policy(s3_client, bucket_name): + s3_client.delete_bucket_policy(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_policy(s3_client, bucket_name): + try: + current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy')) + except is_boto3_error_code('NoSuchBucketPolicy'): + return None + + return current_policy + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_request_payment(s3_client, bucket_name, payer): + s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_request_payment(s3_client, bucket_name): + return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer') + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_versioning(s3_client, bucket_name): + return s3_client.get_bucket_versioning(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_versioning(s3_client, bucket_name, required_versioning): + s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_encryption(s3_client, bucket_name): + if not hasattr(s3_client, "get_bucket_encryption"): + return None + + try: + result = s3_client.get_bucket_encryption(Bucket=bucket_name) + return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault') + except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'): + return None + except (IndexError, KeyError): + return None + + +def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption): + max_retries = 3 + for retries in range(1, max_retries + 1): + try: + put_bucket_encryption(s3_client, name, expected_encryption) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to set bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption, + should_fail=(retries == max_retries), retries=5) + if current_encryption == expected_encryption: + return current_encryption + + # We shouldn't get here, the only time this should happen is if + # current_encryption != expected_encryption and retries == max_retries + # Which should use module.fail_json and fail out first. + module.fail_json(msg='Failed to apply bucket encryption', + current=current_encryption, expected=expected_encryption, retries=retries) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_encryption(s3_client, bucket_name, encryption): + server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]} + s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_tagging(s3_client, bucket_name): + s3_client.delete_bucket_tagging(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_encryption(s3_client, bucket_name): + s3_client.delete_bucket_encryption(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted']) +def delete_bucket(s3_client, bucket_name): + try: + s3_client.delete_bucket(Bucket=bucket_name) + except is_boto3_error_code('NoSuchBucket'): + # This means bucket should have been in a deleting state when we checked it existence + # We just ignore the error + pass + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_public_access(s3_client, bucket_name, public_acces): + ''' + Put new public access block to S3 bucket + ''' + s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_public_access(s3_client, bucket_name): + ''' + Delete public access block from S3 bucket + ''' + s3_client.delete_public_access_block(Bucket=bucket_name) + + +def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True): + for dummy in range(0, 12): + try: + current_policy = get_bucket_policy(s3_client, bucket_name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + + if compare_policies(current_policy, expected_policy): + time.sleep(5) + else: + return current_policy + if should_fail: + module.fail_json(msg="Bucket policy failed to apply in the expected time", + requested_policy=expected_policy, live_policy=current_policy) + else: + return None + + +def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True): + for dummy in range(0, 12): + try: + requester_pays_status = get_bucket_request_payment(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get bucket request payment") + if requester_pays_status != expected_payer: + time.sleep(5) + else: + return requester_pays_status + if should_fail: + module.fail_json(msg="Bucket request payment failed to apply in the expected time", + requested_status=expected_payer, live_status=requester_pays_status) + else: + return None + + +def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12): + for dummy in range(0, retries): + try: + encryption = get_bucket_encryption(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated encryption for bucket") + if encryption != expected_encryption: + time.sleep(5) + else: + return encryption + + if should_fail: + module.fail_json(msg="Bucket encryption failed to apply in the expected time", + requested_encryption=expected_encryption, live_encryption=encryption) + + return encryption + + +def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning): + for dummy in range(0, 24): + try: + versioning_status = get_bucket_versioning(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated versioning for bucket") + if versioning_status.get('Status') != required_versioning: + time.sleep(8) + else: + return versioning_status + module.fail_json(msg="Bucket versioning failed to apply in the expected time", + requested_versioning=required_versioning, live_versioning=versioning_status) + + +def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): + for dummy in range(0, 12): + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + if current_tags_dict != expected_tags_dict: + time.sleep(5) + else: + return current_tags_dict + module.fail_json(msg="Bucket tags failed to apply in the expected time", + requested_tags=expected_tags_dict, live_tags=current_tags_dict) + + +def get_current_bucket_tags_dict(s3_client, bucket_name): + try: + current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet') + except is_boto3_error_code('NoSuchTagSet'): + return {} + # The Ceph S3 API returns a different error code to AWS + except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except + return {} + + return boto3_tag_list_to_ansible_dict(current_tags) + + +def get_bucket_public_access(s3_client, bucket_name): + ''' + Get current bucket public access block + ''' + try: + bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name) + return bucket_public_access_block['PublicAccessBlockConfiguration'] + except is_boto3_error_code('NoSuchPublicAccessBlockConfiguration'): + return {} + + +def paginated_list(s3_client, **pagination_params): + pg = s3_client.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versions_list(s3_client, **pagination_params): + try: + pg = s3_client.get_paginator('list_object_versions') + for page in pg.paginate(**pagination_params): + # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion + yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] + except is_boto3_error_code('NoSuchBucket'): + yield [] + + +def destroy_bucket(s3_client, module): + + force = module.params.get("force") + name = module.params.get("name") + try: + bucket_is_present = bucket_exists(s3_client, name) + except EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + module.exit_json(changed=False) + + if force: + # if there are contents then we need to delete them (including versions) before we can delete the bucket + try: + for key_version_pairs in paginated_versions_list(s3_client, Bucket=name): + formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs] + for fk in formatted_keys: + # remove VersionId from cases where they are `None` so that + # unversioned objects are deleted using `DeleteObject` + # rather than `DeleteObjectVersion`, improving backwards + # compatibility with older IAM policies. + if not fk.get('VersionId'): + fk.pop('VersionId') + + if formatted_keys: + resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys}) + if resp.get('Errors'): + module.fail_json( + msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format( + ', '.join([k['Key'] for k in resp['Errors']]) + ), + errors=resp['Errors'], response=resp + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed while deleting bucket") + + try: + delete_bucket(s3_client, name) + s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) + except WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.') + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket") + + module.exit_json(changed=True) + + +def is_fakes3(s3_url): + """ Return True if s3_url has scheme fakes3:// """ + if s3_url is not None: + return urlparse(s3_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url): + if s3_url and ceph: # TODO - test this + ceph = urlparse(s3_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) + elif is_fakes3(s3_url): + fakes3 = urlparse(s3_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) + return boto3_conn(**params) + + +def main(): + + argument_spec = dict( + force=dict(default=False, type='bool'), + policy=dict(type='json'), + name=dict(required=True), + requester_pays=dict(type='bool'), + s3_url=dict(aliases=['S3_URL']), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), + versioning=dict(type='bool'), + ceph=dict(default=False, type='bool'), + encryption=dict(choices=['none', 'AES256', 'aws:kms']), + encryption_key_id=dict(), + public_access=dict(type='dict', options=dict( + block_public_acls=dict(type='bool', default=False), + ignore_public_acls=dict(type='bool', default=False), + block_public_policy=dict(type='bool', default=False), + restrict_public_buckets=dict(type='bool', default=False))), + delete_public_access=dict(type='bool', default=False) + ) + + required_by = dict( + encryption_key_id=('encryption',), + ) + + mutually_exclusive = [ + ['public_access', 'delete_public_access'] + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, required_by=required_by, mutually_exclusive=mutually_exclusive + ) + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + s3_url = module.params.get('s3_url') + ceph = module.params.get('ceph') + + # allow eucarc environment variables to be used if ansible vars aren't set + if not s3_url and 'S3_URL' in os.environ: + s3_url = os.environ['S3_URL'] + + if ceph and not s3_url: + module.fail_json(msg='ceph flavour requires s3_url') + + # Look at s3_url and tweak connection settings + # if connecting to Ceph RGW, Walrus or fakes3 + if s3_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url) + + if s3_client is None: # this should never happen + module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.') + + state = module.params.get("state") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + + if not hasattr(s3_client, "get_bucket_encryption"): + if encryption is not None: + module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41") + + # Parameter validation + if encryption_key_id is not None and encryption != 'aws:kms': + module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.") + + if state == 'present': + create_or_update_bucket(s3_client, module, location) + elif state == 'absent': + destroy_bucket(s3_client, module) + + +if __name__ == '__main__': + main() |