diff options
Diffstat (limited to 'test/support/integration/plugins/modules')
60 files changed, 27524 insertions, 0 deletions
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py new file mode 120000 index 00000000..f9993bfb --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbconfiguration_info.py
\ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py new file mode 120000 index 00000000..b8293e64 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbdatabase_info.py
\ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py new file mode 120000 index 00000000..4311a0c1 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbfirewallrule_info.py
\ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py new file mode 120000 index 00000000..5f76e0e9 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbserver_info.py
\ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_resource_facts.py b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py new file mode 120000 index 00000000..710fda10 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py @@ -0,0 +1 @@ +azure_rm_resource_info.py
\ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py new file mode 120000 index 00000000..ead87c85 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py @@ -0,0 +1 @@ +azure_rm_webapp_info.py
\ No newline at end of file diff --git a/test/support/integration/plugins/modules/aws_az_info.py b/test/support/integration/plugins/modules/aws_az_info.py new file mode 100644 index 00000000..c1efed6f --- /dev/null +++ b/test/support/integration/plugins/modules/aws_az_info.py @@ -0,0 +1,111 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = ''' +module: aws_az_info +short_description: Gather information about availability zones in AWS. +description: + - Gather information about availability zones in AWS. + - This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change. +version_added: '2.5' +author: 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for + possible filters. Filter names and values are case sensitive. You can also use underscores + instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + required: false + default: {} + type: dict +extends_documentation_fragment: + - aws + - ec2 +requirements: [botocore, boto3] +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all availability zones +- aws_az_info: + +# Gather information about a single availability zone +- aws_az_info: + filters: + zone-name: eu-west-1a +''' + +RETURN = ''' +availability_zones: + returned: on success + description: > + Availability zones that match the provided filters. Each element consists of a dict with all the information + related to that available zone. + type: list + sample: "[ + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1b' + }, + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1c' + } + ]" +''' + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + if module._name == 'aws_az_facts': + module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", + version='2.14', collection_name='ansible.builtin') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items()) + + try: + availability_zones = connection.describe_availability_zones( + Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe availability zones.") + + # Turn the boto3 result into ansible_friendly_snaked_names + snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']] + + module.exit_json(availability_zones=snaked_availability_zones) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/aws_s3.py b/test/support/integration/plugins/modules/aws_s3.py new file mode 100644 index 00000000..54874f05 --- /dev/null +++ b/test/support/integration/plugins/modules/aws_s3.py @@ -0,0 +1,925 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: aws_s3 +short_description: manage objects in S3. +description: + - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and + deleting both objects and buckets, retrieving objects as files or strings and generating download links. + This module has a dependency on boto3 and botocore. +notes: + - In 2.4, this module has been renamed from C(s3) into M(aws_s3). +version_added: "1.1" +options: + bucket: + description: + - Bucket name. + required: true + type: str + dest: + description: + - The destination file path when downloading an object/key with a GET operation. + version_added: "1.3" + type: path + encrypt: + description: + - When set for PUT mode, asks for server-side encryption. + default: true + version_added: "2.0" + type: bool + encryption_mode: + description: + - What encryption mode to use if I(encrypt=true). + default: AES256 + choices: + - AES256 + - aws:kms + version_added: "2.7" + type: str + expiry: + description: + - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation. + default: 600 + aliases: ['expiration'] + type: int + headers: + description: + - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + version_added: "2.0" + type: dict + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + version_added: "2.0" + type: str + max_keys: + description: + - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + default: 1000 + version_added: "2.0" + type: int + metadata: + description: + - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + version_added: "1.6" + type: dict + mode: + description: + - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+), + getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket), + and delobj (delete object, Ansible 2.0+). + required: true + choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'] + type: str + object: + description: + - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. + type: str + permission: + description: + - This option lets the user set the canned permissions on the object/bucket that are created. + The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or + C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read), + C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list. + default: ['private'] + version_added: "2.0" + type: list + elements: str + prefix: + description: + - Limits the response to keys that begin with the specified prefix for list mode. + default: "" + version_added: "2.0" + type: str + version: + description: + - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. + version_added: "2.0" + type: str + overwrite: + description: + - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. + Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0. + When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3. + The ETag may or may not be an MD5 digest of the object data. See the ETag response header here + U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html) + default: 'always' + aliases: ['force'] + version_added: "1.2" + type: str + retries: + description: + - On recoverable failure, how many times to retry before actually failing. + default: 0 + version_added: "2.0" + type: int + aliases: ['retry'] + s3_url: + description: + - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS. + aliases: [ S3_URL ] + type: str + dualstack: + description: + - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Requires at least botocore version 1.4.45. + type: bool + default: false + version_added: "2.7" + rgw: + description: + - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url). + default: false + version_added: "2.2" + type: bool + src: + description: + - The source file path when performing a PUT operation. + version_added: "1.3" + type: str + ignore_nonexistent_bucket: + description: + - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the + GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying + I(ignore_nonexistent_bucket=true)." + version_added: "2.3" + type: bool + encryption_kms_key_id: + description: + - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms) + version_added: "2.7" + type: str +requirements: [ "boto3", "botocore" ] +author: + - "Lester Wade (@lwade)" + - "Sloane Hertel (@s-hertel)" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +- name: Simple PUT operation + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + +- name: Simple PUT operation in Ceph RGW S3 + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + rgw: true + s3_url: "http://localhost:8000" + +- name: Simple GET operation + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + +- name: Get a specific version of an object. + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + version: 48c9ee5131af7a716edc22df9772aa6f + dest: /usr/local/myfile.txt + mode: get + +- name: PUT/upload with metadata + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + metadata: 'Content-Encoding=gzip,Cache-Control=no-cache' + +- name: PUT/upload with custom headers + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + headers: 'x-amz-grant-full-control=emailAddress=owner@example.com' + +- name: List keys simple + aws_s3: + bucket: mybucket + mode: list + +- name: List keys all options + aws_s3: + bucket: mybucket + mode: list + prefix: /my/desired/ + marker: /my/desired/0023.txt + max_keys: 472 + +- name: Create an empty bucket + aws_s3: + bucket: mybucket + mode: create + permission: public-read + +- name: Create a bucket with key as directory, in the EU region + aws_s3: + bucket: mybucket + object: /my/directory/path + mode: create + region: eu-west-1 + +- name: Delete a bucket and all contents + aws_s3: + bucket: mybucket + mode: delete + +- name: GET an object but don't download if the file checksums match. New in 2.0 + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + overwrite: different + +- name: Delete an object from a bucket + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + mode: delobj +''' + +RETURN = ''' +msg: + description: Message indicating the status of the operation. + returned: always + type: str + sample: PUT operation complete +url: + description: URL of the object. + returned: (for put and geturl operations) + type: str + sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature> +expiry: + description: Number of seconds the presigned url is valid for. + returned: (for geturl operation) + type: int + sample: 600 +contents: + description: Contents of the object as string. + returned: (for getstr operation) + type: str + sample: "Hello, world!" +s3_keys: + description: List of object keys. + returned: (for list operation) + type: list + elements: str + sample: + - prefix1/ + - prefix1/key1 + - prefix1/key2 +''' + +import mimetypes +import os +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ssl import SSLError +from ansible.module_utils.basic import to_text, to_native +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5 +from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn + +try: + import botocore +except ImportError: + pass # will be detected by imported AnsibleAWSModule + +IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented'] + + +class Sigv4Required(Exception): + pass + + +def key_check(module, s3, bucket, obj, version=None, validate=True): + exists = True + try: + if version: + s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + s3.head_object(Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + # if a client error is thrown, check if it's a 404 error + # if it's a 404 error, then the object does not exist + error_code = int(e.response['Error']['Code']) + if error_code == 404: + exists = False + elif error_code == 403 and validate is False: + pass + else: + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + return exists + + +def etag_compare(module, local_file, s3, bucket, obj, version=None): + s3_etag = get_etag(s3, bucket, obj, version=version) + local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version) + + return s3_etag == local_etag + + +def get_etag(s3, bucket, obj, version=None): + if version: + key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key_check = s3.head_object(Bucket=bucket, Key=obj) + if not key_check: + return None + return key_check['ETag'] + + +def bucket_check(module, s3, bucket, validate=True): + exists = True + try: + s3.head_bucket(Bucket=bucket) + except botocore.exceptions.ClientError as e: + # If a client error is thrown, then check that it was a 404 error. + # If it was a 404 error, then the bucket does not exist. + error_code = int(e.response['Error']['Code']) + if error_code == 404: + exists = False + elif error_code == 403 and validate is False: + pass + else: + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + except botocore.exceptions.EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided") + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + return exists + + +def create_bucket(module, s3, bucket, location=None): + if module.check_mode: + module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True) + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + try: + if len(configuration) > 0: + s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration) + else: + s3.create_bucket(Bucket=bucket) + if module.params.get('permission'): + # Wait for the bucket to exist before setting ACLs + s3.get_waiter('bucket_exists').wait(Bucket=bucket) + for acl in module.params.get('permission'): + s3.put_bucket_acl(ACL=acl, Bucket=bucket) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS: + module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + else: + module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") + + if bucket: + return True + + +def paginated_list(s3, **pagination_params): + pg = s3.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versioned_list_with_fallback(s3, **pagination_params): + try: + versioned_pg = s3.get_paginator('list_object_versions') + for page in versioned_pg.paginate(**pagination_params): + delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])] + current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])] + yield delete_markers + current_objects + except botocore.exceptions.ClientError as e: + if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']: + for page in paginated_list(s3, **pagination_params): + yield [{'Key': data['Key']} for data in page] + + +def list_keys(module, s3, bucket, prefix, marker, max_keys): + pagination_params = {'Bucket': bucket} + for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)): + pagination_params[param_name] = param_value + try: + keys = sum(paginated_list(s3, **pagination_params), []) + module.exit_json(msg="LIST operation complete", s3_keys=keys) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket)) + + +def delete_bucket(module, s3, bucket): + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + exists = bucket_check(module, s3, bucket) + if exists is False: + return False + # if there are contents then we need to delete them before we can delete the bucket + for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket): + if keys: + s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) + s3.delete_bucket(Bucket=bucket) + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket) + + +def delete_key(module, s3, bucket, obj): + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + s3.delete_object(Bucket=bucket, Key=obj) + module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj) + + +def create_dirkey(module, s3, bucket, obj, encrypt): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + params = {'Bucket': bucket, 'Key': obj, 'Body': b''} + if encrypt: + params['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + + s3.put_object(**params) + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS: + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") + else: + module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True) + + +def path_check(path): + if os.path.exists(path): + return True + else: + return False + + +def option_in_extra_args(option): + temp_option = option.replace('-', '').lower() + + allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition', + 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage', + 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl', + 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP', + 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption', + 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey', + 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'} + + if temp_option in allowed_extra_args: + return allowed_extra_args[temp_option] + + +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + extra = {} + if encrypt: + extra['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + if metadata: + extra['Metadata'] = {} + + # determine object metadata and extra arguments + for option in metadata: + extra_args_option = option_in_extra_args(option) + if extra_args_option is not None: + extra[extra_args_option] = metadata[option] + else: + extra['Metadata'][option] = metadata[option] + + if 'ContentType' not in extra: + content_type = mimetypes.guess_type(src)[0] + if content_type is None: + # s3 default content type + content_type = 'binary/octet-stream' + extra['ContentType'] = content_type + + s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to complete PUT operation.") + try: + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS: + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + else: + module.fail_json_aws(e, msg="Unable to set object ACL") + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Unable to set object ACL") + try: + url = s3.generate_presigned_url(ClientMethod='put_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to generate presigned URL") + module.exit_json(msg="PUT operation complete", url=url, changed=True) + + +def download_s3file(module, s3, bucket, obj, dest, retries, version=None): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + # retries is the number of loops; range/xrange needs to be one + # more to get that count of loops. + try: + if version: + key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key = s3.get_object(Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e): + raise Sigv4Required() + elif e.response['Error']['Code'] not in ("403", "404"): + # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but + # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + + optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {} + for x in range(0, retries + 1): + try: + s3.download_file(bucket, obj, dest, **optional_kwargs) + module.exit_json(msg="GET operation complete", changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="Failed while downloading %s." % obj) + # otherwise, try again, this may be a transient timeout. + except SSLError as e: # will ClientError catch SSLError? + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="s3 download failed") + # otherwise, try again, this may be a transient timeout. + + +def download_s3str(module, s3, bucket, obj, version=None, validate=True): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + try: + if version: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read()) + else: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) + module.exit_json(msg="GET operation complete", contents=contents, changed=True) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e): + raise Sigv4Required() + else: + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + + +def get_download_url(module, s3, bucket, obj, expiry, changed=True): + try: + url = s3.generate_presigned_url(ClientMethod='get_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while getting download url.") + + +def is_fakes3(s3_url): + """ Return True if s3_url has scheme fakes3:// """ + if s3_url is not None: + return urlparse(s3_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False): + if s3_url and rgw: # TODO - test this + rgw = urlparse(s3_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) + elif is_fakes3(s3_url): + fakes3 = urlparse(s3_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) + if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': + params['config'] = botocore.client.Config(signature_version='s3v4') + elif module.params['mode'] in ('get', 'getstr') and sig_4: + params['config'] = botocore.client.Config(signature_version='s3v4') + if module.params['dualstack']: + dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) + if 'config' in params: + params['config'] = params['config'].merge(dualconf) + else: + params['config'] = dualconf + return boto3_conn(**params) + + +def main(): + argument_spec = dict( + bucket=dict(required=True), + dest=dict(default=None, type='path'), + encrypt=dict(default=True, type='bool'), + encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), + expiry=dict(default=600, type='int', aliases=['expiration']), + headers=dict(type='dict'), + marker=dict(default=""), + max_keys=dict(default=1000, type='int'), + metadata=dict(type='dict'), + mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), + object=dict(), + permission=dict(type='list', default=['private']), + version=dict(default=None), + overwrite=dict(aliases=['force'], default='always'), + prefix=dict(default=""), + retries=dict(aliases=['retry'], type='int', default=0), + s3_url=dict(aliases=['S3_URL']), + dualstack=dict(default='no', type='bool'), + rgw=dict(default='no', type='bool'), + src=dict(), + ignore_nonexistent_bucket=dict(default=False, type='bool'), + encryption_kms_key_id=dict() + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['mode', 'put', ['src', 'object']], + ['mode', 'get', ['dest', 'object']], + ['mode', 'getstr', ['object']], + ['mode', 'geturl', ['object']]], + ) + + bucket = module.params.get('bucket') + encrypt = module.params.get('encrypt') + expiry = module.params.get('expiry') + dest = module.params.get('dest', '') + headers = module.params.get('headers') + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') + metadata = module.params.get('metadata') + mode = module.params.get('mode') + obj = module.params.get('object') + version = module.params.get('version') + overwrite = module.params.get('overwrite') + prefix = module.params.get('prefix') + retries = module.params.get('retries') + s3_url = module.params.get('s3_url') + dualstack = module.params.get('dualstack') + rgw = module.params.get('rgw') + src = module.params.get('src') + ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') + + object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] + bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] + + if overwrite not in ['always', 'never', 'different']: + if module.boolean(overwrite): + overwrite = 'always' + else: + overwrite = 'never' + + if overwrite == 'different' and not HAS_MD5: + module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + if module.params.get('object'): + obj = module.params['object'] + # If there is a top level object, do nothing - if the object starts with / + # remove the leading character to maintain compatibility with Ansible versions < 2.4 + if obj.startswith('/'): + obj = obj[1:] + + # Bucket deletion does not require obj. Prevents ambiguity with delobj. + if obj and mode == "delete": + module.fail_json(msg='Parameter obj cannot be used with mode=delete') + + # allow eucarc environment variables to be used if ansible vars aren't set + if not s3_url and 'S3_URL' in os.environ: + s3_url = os.environ['S3_URL'] + + if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url: + module.fail_json(msg='dualstack only applies to AWS S3') + + if dualstack and not module.botocore_at_least('1.4.45'): + module.fail_json(msg='dualstack requires botocore >= 1.4.45') + + # rgw requires an explicit url + if rgw and not s3_url: + module.fail_json(msg='rgw flavour requires s3_url') + + # Look at s3_url and tweak connection settings + # if connecting to RGW, Walrus or fakes3 + if s3_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url) + + validate = not ignore_nonexistent_bucket + + # separate types of ACLs + bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] + object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] + error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] + if error_acl: + module.fail_json(msg='Unknown permission specified: %s' % error_acl) + + # First, we check to see if the bucket exists, we get "bucket" returned. + bucketrtn = bucket_check(module, s3, bucket, validate=validate) + + if validate and mode not in ('create', 'put', 'delete') and not bucketrtn: + module.fail_json(msg="Source bucket cannot be found.") + + if mode == 'get': + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn is False: + if version: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if path_check(dest) and overwrite != 'always': + if overwrite == 'never': + module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False) + if etag_compare(module, dest, s3, bucket, obj, version=version): + module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) + + try: + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + + if mode == 'put': + + # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified + # these were separated into the variables bucket_acl and object_acl above + + if not path_check(src): + module.fail_json(msg="Local object for PUT does not exist") + + if bucketrtn: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + else: + # If the bucket doesn't exist we should create it. + # only use valid bucket acls for create_bucket function + module.params['permission'] = bucket_acl + create_bucket(module, s3, bucket, location) + + if keyrtn and overwrite != 'always': + if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj): + # Return the download URL for the existing object + get_download_url(module, s3, bucket, obj, expiry, changed=False) + + # only use valid object acls for the upload_s3file function + module.params['permission'] = object_acl + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) + + # Delete an object from a bucket, not the entire bucket + if mode == 'delobj': + if obj is None: + module.fail_json(msg="object parameter is required") + if bucket: + deletertn = delete_key(module, s3, bucket, obj) + if deletertn is True: + module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Delete an entire bucket, including all objects in the bucket + if mode == 'delete': + if bucket: + deletertn = delete_bucket(module, s3, bucket) + if deletertn is True: + module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Support for listing a set of keys + if mode == 'list': + exists = bucket_check(module, s3, bucket) + + # If the bucket does not exist then bail out + if not exists: + module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) + + list_keys(module, s3, bucket, prefix, marker, max_keys) + + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. + # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. + if mode == 'create': + + # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified + # these were separated above into the variables bucket_acl and object_acl + + if bucket and not obj: + if bucketrtn: + module.exit_json(msg="Bucket already exists.", changed=False) + else: + # only use valid bucket acls when creating the bucket + module.params['permission'] = bucket_acl + module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) + if bucket and obj: + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + if bucketrtn: + if key_check(module, s3, bucket, dirobj): + module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) + else: + # setting valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt) + else: + # only use valid bucket acls for the create_bucket function + module.params['permission'] = bucket_acl + created = create_bucket(module, s3, bucket, location) + # only use valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt) + + # Support for grabbing the time-expired URL for an object in S3/Walrus. + if mode == 'geturl': + if not bucket and not obj: + module.fail_json(msg="Bucket and Object parameters must be set") + + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + get_download_url(module, s3, bucket, obj, expiry) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if mode == 'getstr': + if bucket and obj: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + try: + download_s3str(module, s3, bucket, obj, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3str(module, s3, bucket, obj, version=version) + elif version is not None: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + module.exit_json(failed=False) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_appserviceplan.py b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py new file mode 100644 index 00000000..ee871c35 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py @@ -0,0 +1,379 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_appserviceplan +version_added: "2.7" +short_description: Manage App Service Plan +description: + - Create, update and delete instance of App Service Plan. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + + name: + description: + - Unique name of the app service plan to create or update. + required: True + + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + + sku: + description: + - The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc. + - Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail. + - For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail. + is_linux: + description: + - Describe whether to host webapp on Linux worker. + type: bool + default: false + + number_of_workers: + description: + - Describe number of workers to be allocated. + + state: + description: + - Assert the state of the app service plan. + - Use C(present) to create or update an app service plan and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a windows app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S1 + + - name: Create a linux app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S1 + is_linux: true + number_of_workers: 1 + + - name: update sku of existing windows app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S2 +''' + +RETURN = ''' +azure_appserviceplan: + description: Facts about the current state of the app service plan. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan" + } +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrestazure.azure_operation import AzureOperationPoller + from msrest.serialization import Model + from azure.mgmt.web.models import ( + app_service_plan, AppServicePlan, SkuDescription + ) +except ImportError: + # This is handled in azure_rm_common + pass + + +def _normalize_sku(sku): + if sku is None: + return sku + + sku = sku.upper() + if sku == 'FREE': + return 'F1' + elif sku == 'SHARED': + return 'D1' + return sku + + +def get_sku_name(tier): + tier = tier.upper() + if tier == 'F1' or tier == "FREE": + return 'FREE' + elif tier == 'D1' or tier == "SHARED": + return 'SHARED' + elif tier in ['B1', 'B2', 'B3', 'BASIC']: + return 'BASIC' + elif tier in ['S1', 'S2', 'S3']: + return 'STANDARD' + elif tier in ['P1', 'P2', 'P3']: + return 'PREMIUM' + elif tier in ['P1V2', 'P2V2', 'P3V2']: + return 'PREMIUMV2' + else: + return None + + +def appserviceplan_to_dict(plan): + return dict( + id=plan.id, + name=plan.name, + kind=plan.kind, + location=plan.location, + reserved=plan.reserved, + is_linux=plan.reserved, + provisioning_state=plan.provisioning_state, + status=plan.status, + target_worker_count=plan.target_worker_count, + sku=dict( + name=plan.sku.name, + size=plan.sku.size, + tier=plan.sku.tier, + family=plan.sku.family, + capacity=plan.sku.capacity + ), + resource_group=plan.resource_group, + number_of_sites=plan.number_of_sites, + tags=plan.tags if plan.tags else None + ) + + +class AzureRMAppServicePlans(AzureRMModuleBase): + """Configuration class for an Azure RM App Service Plan resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + sku=dict( + type='str' + ), + is_linux=dict( + type='bool', + default=False + ), + number_of_workers=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + + self.sku = None + self.is_linux = None + self.number_of_workers = 1 + + self.tags = None + + self.results = dict( + changed=False, + ansible_facts=dict(azure_appserviceplan=None) + ) + self.state = None + + super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if kwargs[key]: + setattr(self, key, kwargs[key]) + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get app service plan + old_response = self.get_plan() + + # if not existing + if not old_response: + self.log("App Service plan doesn't exist") + + if self.state == "present": + to_be_updated = True + + if not self.sku: + self.fail('Please specify sku in plan when creation') + + else: + # existing app service plan, do update + self.log("App Service Plan already exists") + + if self.state == 'present': + self.log('Result: {0}'.format(old_response)) + + update_tags, newtags = self.update_tags(old_response.get('tags', dict())) + + if update_tags: + to_be_updated = True + self.tags = newtags + + # check if sku changed + if self.sku and _normalize_sku(self.sku) != old_response['sku']['size']: + to_be_updated = True + + # check if number_of_workers changed + if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']: + to_be_updated = True + + if self.is_linux and self.is_linux != old_response['reserved']: + self.fail("Operation not allowed: cannot update reserved of app service plan.") + + if old_response: + self.results['id'] = old_response['id'] + + if to_be_updated: + self.log('Need to Create/Update app service plan') + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_or_update_plan() + self.results['id'] = response['id'] + + if self.state == 'absent' and old_response: + self.log("Delete app service plan") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_plan() + + self.log('App service plan instance deleted') + + return self.results + + def get_plan(self): + ''' + Gets app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Get App Service Plan {0}".format(self.name)) + + try: + response = self.web_client.app_service_plans.get(self.resource_group, self.name) + if response: + self.log("Response : {0}".format(response)) + self.log("App Service Plan : {0} found".format(response.name)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group)) + + return False + + def create_or_update_plan(self): + ''' + Creates app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Create App Service Plan {0}".format(self.name)) + + try: + # normalize sku + sku = _normalize_sku(self.sku) + + sku_def = SkuDescription(tier=get_sku_name( + sku), name=sku, capacity=self.number_of_workers) + plan_def = AppServicePlan( + location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None) + + response = self.web_client.app_service_plans.create_or_update(self.resource_group, self.name, plan_def) + + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + self.log("Response : {0}".format(response)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex))) + + def delete_plan(self): + ''' + Deletes specified App service plan in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the App service plan {0}".format(self.name)) + try: + response = self.web_client.app_service_plans.delete(resource_group_name=self.resource_group, + name=self.name) + except CloudError as e: + self.log('Error attempting to delete App service plan.') + self.fail( + "Error deleting the App service plan : {0}".format(str(e))) + + return True + + +def main(): + """Main execution""" + AzureRMAppServicePlans() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp.py b/test/support/integration/plugins/modules/azure_rm_functionapp.py new file mode 100644 index 00000000..0c372a88 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_functionapp.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_functionapp +version_added: "2.4" +short_description: Manage Azure Function Apps +description: + - Create, update or delete an Azure Function App. +options: + resource_group: + description: + - Name of resource group. + required: true + aliases: + - resource_group_name + name: + description: + - Name of the Azure Function App. + required: true + location: + description: + - Valid Azure location. Defaults to location of the resource group. + plan: + description: + - App service plan. + - It can be name of existing app service plan in same resource group as function app. + - It can be resource id of existing app service plan. + - Resource id. For example /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>. + - It can be a dict which contains C(name), C(resource_group). + - C(name). Name of app service plan. + - C(resource_group). Resource group name of app service plan. + version_added: "2.8" + container_settings: + description: Web app container settings. + suboptions: + name: + description: + - Name of container. For example "imagename:tag". + registry_server_url: + description: + - Container registry server url. For example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + version_added: "2.8" + storage_account: + description: + - Name of the storage account to use. + required: true + aliases: + - storage + - storage_account_name + app_settings: + description: + - Dictionary containing application settings. + state: + description: + - Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Thomas Stringer (@trstringer) +''' + +EXAMPLES = ''' +- name: Create a function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + +- name: Create a function app with app settings + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + app_settings: + setting1: value1 + setting2: value2 + +- name: Create container based function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + plan: + resource_group: myResourceGroup + name: myAppPlan + container_settings: + name: httpd + registry_server_url: index.docker.io + +- name: Delete a function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the Azure Function App. + returned: success + type: dict + example: + id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp + name: myfunctionapp + kind: functionapp + location: East US + type: Microsoft.Web/sites + state: Running + host_names: + - myfunctionapp.azurewebsites.net + repository_site_name: myfunctionapp + usage_state: Normal + enabled: true + enabled_host_names: + - myfunctionapp.azurewebsites.net + - myfunctionapp.scm.azurewebsites.net + availability_state: Normal + host_name_ssl_states: + - name: myfunctionapp.azurewebsites.net + ssl_state: Disabled + host_type: Standard + - name: myfunctionapp.scm.azurewebsites.net + ssl_state: Disabled + host_type: Repository + server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan + reserved: false + last_modified_time_utc: 2017-08-22T18:54:01.190Z + scm_site_also_stopped: false + client_affinity_enabled: true + client_cert_enabled: false + host_names_disabled: false + outbound_ip_addresses: ............ + container_size: 1536 + daily_memory_time_quota: 0 + resource_group: myResourceGroup + default_host_name: myfunctionapp.azurewebsites.net +''' # NOQA + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.web.models import ( + site_config, app_service_plan, Site, SiteConfig, NameValuePair, SiteSourceControl, + AppServicePlan, SkuDescription + ) + from azure.mgmt.resource.resources import ResourceManagementClient + from msrest.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + + +class AzureRMFunctionApp(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True, aliases=['resource_group_name']), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + storage_account=dict( + type='str', + aliases=['storage', 'storage_account_name'] + ), + app_settings=dict(type='dict'), + plan=dict( + type='raw' + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ) + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.storage_account = None + self.app_settings = None + self.plan = None + self.container_settings = None + + required_if = [('state', 'present', ['storage_account'])] + + super(AzureRMFunctionApp, self).__init__( + self.module_arg_spec, + supports_check_mode=True, + required_if=required_if + ) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + if self.app_settings is None: + self.app_settings = dict() + + try: + resource_group = self.rm_client.resource_groups.get(self.resource_group) + except CloudError: + self.fail('Unable to retrieve resource group') + + self.location = self.location or resource_group.location + + try: + function_app = self.web_client.web_apps.get( + resource_group_name=self.resource_group, + name=self.name + ) + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + exists = function_app is not None + except CloudError as exc: + exists = False + + if self.state == 'absent': + if exists: + if self.check_mode: + self.results['changed'] = True + return self.results + try: + self.web_client.web_apps.delete( + resource_group_name=self.resource_group, + name=self.name + ) + self.results['changed'] = True + except CloudError as exc: + self.fail('Failure while deleting web app: {0}'.format(exc)) + else: + self.results['changed'] = False + else: + kind = 'functionapp' + linux_fx_version = None + if self.container_settings and self.container_settings.get('name'): + kind = 'functionapp,linux,container' + linux_fx_version = 'DOCKER|' + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + linux_fx_version += self.container_settings['registry_server_url'] + '/' + linux_fx_version += self.container_settings['name'] + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user') + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password') + + if not self.plan and function_app: + self.plan = function_app.server_farm_id + + if not exists: + function_app = Site( + location=self.location, + kind=kind, + site_config=SiteConfig( + app_settings=self.aggregated_app_settings(), + scm_type='LocalGit' + ) + ) + self.results['changed'] = True + else: + self.results['changed'], function_app = self.update(function_app) + + # get app service plan + if self.plan: + if isinstance(self.plan, dict): + self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format( + self.subscription_id, + self.plan.get('resource_group', self.resource_group), + self.plan.get('name') + ) + function_app.server_farm_id = self.plan + + # set linux fx version + if linux_fx_version: + function_app.site_config.linux_fx_version = linux_fx_version + + if self.check_mode: + self.results['state'] = function_app.as_dict() + elif self.results['changed']: + try: + new_function_app = self.web_client.web_apps.create_or_update( + resource_group_name=self.resource_group, + name=self.name, + site_envelope=function_app + ).result() + self.results['state'] = new_function_app.as_dict() + except CloudError as exc: + self.fail('Error creating or updating web app: {0}'.format(exc)) + + return self.results + + def update(self, source_function_app): + """Update the Site object if there are any changes""" + + source_app_settings = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, + name=self.name + ) + + changed, target_app_settings = self.update_app_settings(source_app_settings.properties) + + source_function_app.site_config = SiteConfig( + app_settings=target_app_settings, + scm_type='LocalGit' + ) + + return changed, source_function_app + + def update_app_settings(self, source_app_settings): + """Update app settings""" + + target_app_settings = self.aggregated_app_settings() + target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings]) + return target_app_settings_dict != source_app_settings, target_app_settings + + def necessary_functionapp_settings(self): + """Construct the necessary app settings required for an Azure Function App""" + + function_app_settings = [] + + if self.container_settings is None: + for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']: + function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string)) + function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1')) + function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0')) + function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name)) + else: + function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2')) + function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False)) + function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string)) + + return function_app_settings + + def aggregated_app_settings(self): + """Combine both system and user app settings""" + + function_app_settings = self.necessary_functionapp_settings() + for app_setting_key in self.app_settings: + found_setting = None + for s in function_app_settings: + if s.name == app_setting_key: + found_setting = s + break + if found_setting: + found_setting.value = self.app_settings[app_setting_key] + else: + function_app_settings.append(NameValuePair( + name=app_setting_key, + value=self.app_settings[app_setting_key] + )) + return function_app_settings + + @property + def storage_connection_string(self): + """Construct the storage account connection string""" + + return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format( + self.storage_account, + self.storage_key + ) + + @property + def storage_key(self): + """Retrieve the storage account key""" + + return self.storage_client.storage_accounts.list_keys( + resource_group_name=self.resource_group, + account_name=self.storage_account + ).keys[0].value + + +def main(): + """Main function execution""" + + AzureRMFunctionApp() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp_info.py b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py new file mode 100644 index 00000000..40672f95 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com> + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_functionapp_info +version_added: "2.9" +short_description: Get Azure Function App facts +description: + - Get facts for one Azure Function App or all Function Apps within a resource group. +options: + name: + description: + - Only show results for a specific Function App. + resource_group: + description: + - Limit results to a resource group. Required when filtering by name. + aliases: + - resource_group_name + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + +extends_documentation_fragment: + - azure + +author: + - Thomas Stringer (@trstringer) +''' + +EXAMPLES = ''' + - name: Get facts for one Function App + azure_rm_functionapp_info: + resource_group: myResourceGroup + name: myfunctionapp + + - name: Get facts for all Function Apps in a resource group + azure_rm_functionapp_info: + resource_group: myResourceGroup + + - name: Get facts for all Function Apps by tags + azure_rm_functionapp_info: + tags: + - testing +''' + +RETURN = ''' +azure_functionapps: + description: + - List of Azure Function Apps dicts. + returned: always + type: list + example: + id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp + name: myfunctionapp + kind: functionapp + location: East US + type: Microsoft.Web/sites + state: Running + host_names: + - myfunctionapp.azurewebsites.net + repository_site_name: myfunctionapp + usage_state: Normal + enabled: true + enabled_host_names: + - myfunctionapp.azurewebsites.net + - myfunctionapp.scm.azurewebsites.net + availability_state: Normal + host_name_ssl_states: + - name: myfunctionapp.azurewebsites.net + ssl_state: Disabled + host_type: Standard + - name: myfunctionapp.scm.azurewebsites.net + ssl_state: Disabled + host_type: Repository + server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan + reserved: false + last_modified_time_utc: 2017-08-22T18:54:01.190Z + scm_site_also_stopped: false + client_affinity_enabled: true + client_cert_enabled: false + host_names_disabled: false + outbound_ip_addresses: ............ + container_size: 1536 + daily_memory_time_quota: 0 + resource_group: myResourceGroup + default_host_name: myfunctionapp.azurewebsites.net +''' + +try: + from msrestazure.azure_exceptions import CloudError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMFunctionAppInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str', aliases=['resource_group_name']), + tags=dict(type='list'), + ) + + self.results = dict( + changed=False, + ansible_info=dict(azure_functionapps=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMFunctionAppInfo, self).__init__( + self.module_arg_spec, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_functionapp_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['ansible_info']['azure_functionapps'] = self.get_functionapp() + elif self.resource_group: + self.results['ansible_info']['azure_functionapps'] = self.list_resource_group() + else: + self.results['ansible_info']['azure_functionapps'] = self.list_all() + + return self.results + + def get_functionapp(self): + self.log('Get properties for Function App {0}'.format(self.name)) + function_app = None + result = [] + + try: + function_app = self.web_client.web_apps.get( + self.resource_group, + self.name + ) + except CloudError: + pass + + if function_app and self.has_tags(function_app.tags, self.tags): + result = function_app.as_dict() + + return [result] + + def list_resource_group(self): + self.log('List items') + try: + response = self.web_client.web_apps.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item.as_dict()) + return results + + def list_all(self): + self.log('List all items') + try: + response = self.web_client.web_apps.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item.as_dict()) + return results + + +def main(): + AzureRMFunctionAppInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py new file mode 100644 index 00000000..212cf795 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbconfiguration +version_added: "2.8" +short_description: Manage Configuration instance +description: + - Create, update and delete instance of Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the server configuration. + required: True + value: + description: + - Value of the configuration. + state: + description: + - Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) +''' + +EXAMPLES = ''' + - name: Update SQL Server setting + azure_rm_mariadbconfiguration: + resource_group: myResourceGroup + server_name: myServer + name: event_scheduler + value: "ON" +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi + gurations/event_scheduler" +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from azure.mgmt.rdbms.mysql import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbConfiguration(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + value=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.value = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + old_response = self.get_configuration() + + if not old_response: + self.log("Configuration instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Configuration instance already exists") + if self.state == 'absent' and old_response['source'] == 'user-override': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Configuration instance has to be deleted or may be updated") + if self.value != old_response.get('value'): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Configuration instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_configuration() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Configuration instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_configuration() + else: + self.log("Configuration instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_configuration(self): + self.log("Creating / Updating the Configuration instance {0}".format(self.name)) + + try: + response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + value=self.value, + source='user-override') + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the Configuration instance.') + self.fail("Error creating the Configuration instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_configuration(self): + self.log("Deleting the Configuration instance {0}".format(self.name)) + try: + response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + source='system-default') + except CloudError as e: + self.log('Error attempting to delete the Configuration instance.') + self.fail("Error deleting the Configuration instance: {0}".format(str(e))) + + return True + + def get_configuration(self): + self.log("Checking if the Configuration instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Configuration instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the Configuration instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbConfiguration() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py new file mode 100644 index 00000000..3faac5eb --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbconfiguration_info +version_added: "2.9" +short_description: Get Azure MariaDB Configuration facts +description: + - Get facts of Azure MariaDB Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - Setting name. + type: str + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get specific setting of MariaDB Server + azure_rm_mariadbconfiguration_info: + resource_group: myResourceGroup + server_name: testserver + name: deadlock_timeout + + - name: Get all settings of MariaDB Server + azure_rm_mariadbconfiguration_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +settings: + description: + - A list of dictionaries containing MariaDB Server settings. + returned: always + type: complex + contains: + id: + description: + - Setting resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver + /configurations/deadlock_timeout" + name: + description: + - Setting name. + returned: always + type: str + sample: deadlock_timeout + value: + description: + - Setting value. + returned: always + type: raw + sample: 1000 + description: + description: + - Description of the configuration. + returned: always + type: str + sample: Deadlock timeout. + source: + description: + - Source of the configuration. + returned: always + type: str + sample: system-default +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict(changed=False) + self.mgmt_client = None + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name is not None: + self.results['settings'] = self.get() + else: + self.results['settings'] = self.list_by_server() + return self.results + + def get(self): + ''' + Gets facts of the specified MariaDB Configuration. + + :return: deserialized MariaDB Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + ''' + Gets facts of the specified MariaDB Configuration. + + :return: deserialized MariaDB Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'id': d['id'], + 'name': d['name'], + 'value': d['value'], + 'description': d['description'], + 'source': d['source'] + } + return d + + +def main(): + AzureRMMariaDbConfigurationInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py new file mode 100644 index 00000000..8492b968 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py @@ -0,0 +1,304 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com> +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbdatabase +version_added: "2.8" +short_description: Manage MariaDB Database instance +description: + - Create, update and delete instance of MariaDB Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the database. + required: True + charset: + description: + - The charset of the database. Check MariaDB documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + collation: + description: + - The collation of the database. Check MariaDB documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + force_update: + description: + - When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set. + - When set to C(false), no change will occur to the database even if any of the properties do not match. + type: bool + default: 'no' + state: + description: + - Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB Database + azure_rm_mariadbdatabase: + resource_group: myResourceGroup + server_name: testserver + name: db1 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1 +name: + description: + - Resource name. + returned: always + type: str + sample: db1 +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbDatabase(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB Database resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + charset=dict( + type='str' + ), + collation=dict( + type='str' + ), + force_update=dict( + type='bool', + default=False + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.force_update = None + self.parameters = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "charset": + self.parameters["charset"] = kwargs[key] + elif key == "collation": + self.parameters["collation"] = kwargs[key] + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_mariadbdatabase() + + if not old_response: + self.log("MariaDB Database instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB Database instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB Database instance has to be deleted or may be updated") + if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']): + self.to_do = Actions.Update + if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']): + self.to_do = Actions.Update + if self.to_do == Actions.Update: + if self.force_update: + if not self.check_mode: + self.delete_mariadbdatabase() + else: + self.fail("Database properties cannot be updated without setting 'force_update' option") + self.to_do = Actions.NoAction + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB Database instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mariadbdatabase() + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB Database instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mariadbdatabase() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mariadbdatabase(): + time.sleep(20) + else: + self.log("MariaDB Database instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["name"] = response["name"] + + return self.results + + def create_update_mariadbdatabase(self): + ''' + Creates or updates MariaDB Database with the specified configuration. + + :return: deserialized MariaDB Database instance state dictionary + ''' + self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name)) + + try: + response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the MariaDB Database instance.') + self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mariadbdatabase(self): + ''' + Deletes specified MariaDB Database instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB Database instance {0}".format(self.name)) + try: + response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + except CloudError as e: + self.log('Error attempting to delete the MariaDB Database instance.') + self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e))) + + return True + + def get_mariadbdatabase(self): + ''' + Gets the properties of the specified MariaDB Database. + + :return: deserialized MariaDB Database instance state dictionary + ''' + self.log("Checking if the MariaDB Database instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB Database instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the MariaDB Database instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbDatabase() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py new file mode 100644 index 00000000..e9c99c14 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com> +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbdatabase_info +version_added: "2.9" +short_description: Get Azure MariaDB Database facts +description: + - Get facts of MariaDB Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the database. + type: str + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: myResourceGroup + server_name: server_name + name: database_name + + - name: List instances of MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +databases: + description: + - A list of dictionaries containing facts for MariaDB Databases. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser + ver/databases/db1" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: testrg + server_name: + description: + - Server name. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: db1 + charset: + description: + - The charset of the database. + returned: always + type: str + sample: UTF8 + collation: + description: + - The collation of the database. + returned: always + type: str + sample: English_United States.1252 +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.server_name is not None and + self.name is not None): + self.results['databases'] = self.get() + elif (self.resource_group is not None and + self.server_name is not None): + self.results['databases'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mariadb_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for Databases.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e))) + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'name': d['name'], + 'charset': d['charset'], + 'collation': d['collation'] + } + return d + + +def main(): + AzureRMMariaDbDatabaseInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py new file mode 100644 index 00000000..1fc8c5e7 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com> +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbfirewallrule +version_added: "2.8" +short_description: Manage MariaDB firewall rule instance +description: + - Create, update and delete instance of MariaDB firewall rule. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the MariaDB firewall rule. + required: True + start_ip_address: + description: + - The start IP address of the MariaDB firewall rule. Must be IPv4 format. + end_ip_address: + description: + - The end IP address of the MariaDB firewall rule. Must be IPv4 format. + state: + description: + - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB firewall rule + azure_rm_mariadbfirewallrule: + resource_group: myResourceGroup + server_name: testserver + name: rule1 + start_ip_address: 10.0.0.17 + end_ip_address: 10.0.0.20 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire + wallRules/rule1" +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbFirewallRule(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB firewall rule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + start_ip_address=dict( + type='str' + ), + end_ip_address=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.start_ip_address = None + self.end_ip_address = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_firewallrule() + + if not old_response: + self.log("MariaDB firewall rule instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB firewall rule instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated") + if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']): + self.to_do = Actions.Update + if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB firewall rule instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_firewallrule() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB firewall rule instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_firewallrule() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_firewallrule(): + time.sleep(20) + else: + self.log("MariaDB firewall rule instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_firewallrule(self): + ''' + Creates or updates MariaDB firewall rule with the specified configuration. + + :return: deserialized MariaDB firewall rule instance state dictionary + ''' + self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name)) + + try: + response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name, + start_ip_address=self.start_ip_address, + end_ip_address=self.end_ip_address) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the MariaDB firewall rule instance.') + self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_firewallrule(self): + ''' + Deletes specified MariaDB firewall rule instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name)) + try: + response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + except CloudError as e: + self.log('Error attempting to delete the MariaDB firewall rule instance.') + self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e))) + + return True + + def get_firewallrule(self): + ''' + Gets the properties of the specified MariaDB firewall rule. + + :return: deserialized MariaDB firewall rule instance state dictionary + ''' + self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB firewall rule instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the MariaDB firewall rule instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbFirewallRule() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py new file mode 100644 index 00000000..ef71be8d --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com> +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbfirewallrule_info +version_added: "2.9" +short_description: Get Azure MariaDB Firewall Rule facts +description: + - Get facts of Azure MariaDB Firewall Rule. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the server firewall rule. + type: str + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name + name: firewall_rule_name + + - name: List instances of MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +rules: + description: + - A list of dictionaries containing facts for MariaDB Firewall Rule. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire + wallRules/rule1" + server_name: + description: + - The name of the server. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: rule1 + start_ip_address: + description: + - The start IP address of the MariaDB firewall rule. + returned: always + type: str + sample: 10.0.0.16 + end_ip_address: + description: + - The end IP address of the MariaDB firewall rule. + returned: always + type: str + sample: 10.0.0.18 +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.name is not None): + self.results['rules'] = self.get() + else: + self.results['rules'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d['id'], + 'server_name': self.server_name, + 'name': d['name'], + 'start_ip_address': d['start_ip_address'], + 'end_ip_address': d['end_ip_address'] + } + return d + + +def main(): + AzureRMMariaDbFirewallRuleInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py new file mode 100644 index 00000000..30a29988 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com> +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbserver +version_added: "2.8" +short_description: Manage MariaDB Server instance +description: + - Create, update and delete instance of MariaDB Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + name: + description: + - The name of the server. + required: True + sku: + description: + - The SKU (pricing tier) of the server. + suboptions: + name: + description: + - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8). + tier: + description: + - The tier of the particular SKU, for example C(Basic). + choices: + - basic + - standard + capacity: + description: + - The scale up/out capacity, representing server's compute units. + type: int + size: + description: + - The size code, to be interpreted by resource as appropriate. + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + storage_mb: + description: + - The maximum storage allowed for a server. + type: int + version: + description: + - Server version. + choices: + - 10.2 + enforce_ssl: + description: + - Enable SSL enforcement. + type: bool + default: False + admin_username: + description: + - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation). + admin_password: + description: + - The password of the administrator login. + create_mode: + description: + - Create mode of SQL Server. + default: Default + state: + description: + - Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB Server + azure_rm_mariadbserver: + resource_group: myResourceGroup + name: testserver + sku: + name: B_Gen5_1 + tier: Basic + location: eastus + storage_mb: 1024 + enforce_ssl: True + version: 10.2 + admin_username: cloudsa + admin_password: password +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593 +version: + description: + - Server version. Possible values include C(10.2). + returned: always + type: str + sample: 10.2 +state: + description: + - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled). + returned: always + type: str + sample: Ready +fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: mariadbsrv1b6dd89593.mariadb.database.azure.com +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbServers(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB Server resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + sku=dict( + type='dict' + ), + location=dict( + type='str' + ), + storage_mb=dict( + type='int' + ), + version=dict( + type='str', + choices=['10.2'] + ), + enforce_ssl=dict( + type='bool', + default=False + ), + create_mode=dict( + type='str', + default='Default' + ), + admin_username=dict( + type='str' + ), + admin_password=dict( + type='str', + no_log=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + self.tags = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "sku": + ev = kwargs[key] + if 'tier' in ev: + if ev['tier'] == 'basic': + ev['tier'] = 'Basic' + elif ev['tier'] == 'standard': + ev['tier'] = 'Standard' + self.parameters["sku"] = ev + elif key == "location": + self.parameters["location"] = kwargs[key] + elif key == "storage_mb": + self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key] + elif key == "version": + self.parameters.setdefault("properties", {})["version"] = kwargs[key] + elif key == "enforce_ssl": + self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled' + elif key == "create_mode": + self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key] + elif key == "admin_username": + self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key] + elif key == "admin_password": + self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_mariadbserver() + + if not old_response: + self.log("MariaDB Server instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB Server instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB Server instance has to be deleted or may be updated") + update_tags, newtags = self.update_tags(old_response.get('tags', {})) + if update_tags: + self.tags = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB Server instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mariadbserver() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB Server instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mariadbserver() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mariadbserver(): + time.sleep(20) + else: + self.log("MariaDB Server instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["version"] = response["version"] + self.results["state"] = response["user_visible_state"] + self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"] + + return self.results + + def create_update_mariadbserver(self): + ''' + Creates or updates MariaDB Server with the specified configuration. + + :return: deserialized MariaDB Server instance state dictionary + ''' + self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name)) + + try: + self.parameters['tags'] = self.tags + if self.to_do == Actions.Create: + response = self.mariadb_client.servers.create(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + else: + # structure of parameters for update must be changed + self.parameters.update(self.parameters.pop("properties", {})) + response = self.mariadb_client.servers.update(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the MariaDB Server instance.') + self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mariadbserver(self): + ''' + Deletes specified MariaDB Server instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB Server instance {0}".format(self.name)) + try: + response = self.mariadb_client.servers.delete(resource_group_name=self.resource_group, + server_name=self.name) + except CloudError as e: + self.log('Error attempting to delete the MariaDB Server instance.') + self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e))) + + return True + + def get_mariadbserver(self): + ''' + Gets the properties of the specified MariaDB Server. + + :return: deserialized MariaDB Server instance state dictionary + ''' + self.log("Checking if the MariaDB Server instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB Server instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the MariaDB Server instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbServers() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py new file mode 100644 index 00000000..464aa4d8 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com> +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbserver_info +version_added: "2.9" +short_description: Get Azure MariaDB Server facts +description: + - Get facts of MariaDB Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + name: + description: + - The name of the server. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Server + azure_rm_mariadbserver_info: + resource_group: myResourceGroup + name: server_name + + - name: List instances of MariaDB Server + azure_rm_mariadbserver_info: + resource_group: myResourceGroup +''' + +RETURN = ''' +servers: + description: + - A list of dictionaries containing facts for MariaDB servers. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223 + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Resource name. + returned: always + type: str + sample: myabdud1223 + location: + description: + - The location the resource resides in. + returned: always + type: str + sample: eastus + sku: + description: + - The SKU of the server. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: GP_Gen4_2 + tier: + description: + - The tier of the particular SKU. + returned: always + type: str + sample: GeneralPurpose + capacity: + description: + - The scale capacity. + returned: always + type: int + sample: 2 + storage_mb: + description: + - The maximum storage allowed for a server. + returned: always + type: int + sample: 128000 + enforce_ssl: + description: + - Enable SSL enforcement. + returned: always + type: bool + sample: False + admin_username: + description: + - The administrator's login name of a server. + returned: always + type: str + sample: serveradmin + version: + description: + - Server version. + returned: always + type: str + sample: "9.6" + user_visible_state: + description: + - A state of a server that is visible to user. + returned: always + type: str + sample: Ready + fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: myabdud1223.mys.database.azure.com + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + type: dict + sample: { tag1: abc } +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbServerInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + self.tags = None + super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.name is not None): + self.results['servers'] = self.get() + elif (self.resource_group is not None): + self.results['servers'] = self.list_by_resource_group() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mariadb_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for MariaDB Server.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for MariaDB Servers.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'id': d['id'], + 'resource_group': self.resource_group, + 'name': d['name'], + 'sku': d['sku'], + 'location': d['location'], + 'storage_mb': d['storage_profile']['storage_mb'], + 'version': d['version'], + 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'), + 'admin_username': d['administrator_login'], + 'user_visible_state': d['user_visible_state'], + 'fully_qualified_domain_name': d['fully_qualified_domain_name'], + 'tags': d.get('tags') + } + + return d + + +def main(): + AzureRMMariaDbServerInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_resource.py b/test/support/integration/plugins/modules/azure_rm_resource.py new file mode 100644 index 00000000..6ea3e3bb --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_resource.py @@ -0,0 +1,427 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_resource +version_added: "2.6" +short_description: Create any Azure resource +description: + - Create, update or delete any Azure resource using Azure REST API. + - This module gives access to resources that are not supported via Ansible modules. + - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API. + +options: + url: + description: + - Azure RM Resource URL. + api_version: + description: + - Specific API version to be used. + provider: + description: + - Provider type. + - Required if URL is not specified. + resource_group: + description: + - Resource group to be used. + - Required if URL is not specified. + resource_type: + description: + - Resource type. + - Required if URL is not specified. + resource_name: + description: + - Resource name. + - Required if URL Is not specified. + subresource: + description: + - List of subresources. + suboptions: + namespace: + description: + - Subresource namespace. + type: + description: + - Subresource type. + name: + description: + - Subresource name. + body: + description: + - The body of the HTTP request/response to the web service. + method: + description: + - The HTTP method of the request or response. It must be uppercase. + choices: + - GET + - PUT + - POST + - HEAD + - PATCH + - DELETE + - MERGE + default: "PUT" + status_code: + description: + - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes. + type: list + default: [ 200, 201, 202 ] + idempotency: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: no + type: bool + polling_timeout: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: 0 + type: int + version_added: "2.8" + polling_interval: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: 60 + type: int + version_added: "2.8" + state: + description: + - Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Update scaleset info using azure_rm_resource + azure_rm_resource: + resource_group: myResourceGroup + provider: compute + resource_type: virtualmachinescalesets + resource_name: myVmss + api_version: "2017-12-01" + body: { body } +''' + +RETURN = ''' +response: + description: + - Response specific to resource type. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Storage/storageAccounts/staccb57dc95183" + kind: + description: + - The kind of storage. + type: str + returned: always + sample: Storage + location: + description: + - The resource location, defaults to location of the resource group. + type: str + returned: always + sample: eastus + name: + description: + The storage account name. + type: str + returned: always + sample: staccb57dc95183 + properties: + description: + - The storage account's related properties. + type: dict + returned: always + sample: { + "creationTime": "2019-06-13T06:34:33.0996676Z", + "encryption": { + "keySource": "Microsoft.Storage", + "services": { + "blob": { + "enabled": true, + "lastEnabledTime": "2019-06-13T06:34:33.1934074Z" + }, + "file": { + "enabled": true, + "lastEnabledTime": "2019-06-13T06:34:33.1934074Z" + } + } + }, + "networkAcls": { + "bypass": "AzureServices", + "defaultAction": "Allow", + "ipRules": [], + "virtualNetworkRules": [] + }, + "primaryEndpoints": { + "blob": "https://staccb57dc95183.blob.core.windows.net/", + "file": "https://staccb57dc95183.file.core.windows.net/", + "queue": "https://staccb57dc95183.queue.core.windows.net/", + "table": "https://staccb57dc95183.table.core.windows.net/" + }, + "primaryLocation": "eastus", + "provisioningState": "Succeeded", + "secondaryLocation": "westus", + "statusOfPrimary": "available", + "statusOfSecondary": "available", + "supportsHttpsTrafficOnly": false + } + sku: + description: + - The storage account SKU. + type: dict + returned: always + sample: { + "name": "Standard_GRS", + "tier": "Standard" + } + tags: + description: + - Resource tags. + type: dict + returned: always + sample: { 'key1': 'value1' } + type: + description: + - The resource type. + type: str + returned: always + sample: "Microsoft.Storage/storageAccounts" + +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.azure_rm_common_rest import GenericRestClient +from ansible.module_utils.common.dict_transformations import dict_merge + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.service_client import ServiceClient + from msrestazure.tools import resource_id, is_valid_resource_id + import json + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMResource(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + url=dict( + type='str' + ), + provider=dict( + type='str', + ), + resource_group=dict( + type='str', + ), + resource_type=dict( + type='str', + ), + resource_name=dict( + type='str', + ), + subresource=dict( + type='list', + default=[] + ), + api_version=dict( + type='str' + ), + method=dict( + type='str', + default='PUT', + choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"] + ), + body=dict( + type='raw' + ), + status_code=dict( + type='list', + default=[200, 201, 202] + ), + idempotency=dict( + type='bool', + default=False + ), + polling_timeout=dict( + type='int', + default=0 + ), + polling_interval=dict( + type='int', + default=60 + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + # store the results of the module operation + self.results = dict( + changed=False, + response=None + ) + self.mgmt_client = None + self.url = None + self.api_version = None + self.provider = None + self.resource_group = None + self.resource_type = None + self.resource_name = None + self.subresource_type = None + self.subresource_name = None + self.subresource = [] + self.method = None + self.status_code = [] + self.idempotency = False + self.polling_timeout = None + self.polling_interval = None + self.state = None + self.body = None + super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.state == 'absent': + self.method = 'DELETE' + self.status_code.append(204) + + if self.url is None: + orphan = None + rargs = dict() + rargs['subscription'] = self.subscription_id + rargs['resource_group'] = self.resource_group + if not (self.provider is None or self.provider.lower().startswith('.microsoft')): + rargs['namespace'] = "Microsoft." + self.provider + else: + rargs['namespace'] = self.provider + + if self.resource_type is not None and self.resource_name is not None: + rargs['type'] = self.resource_type + rargs['name'] = self.resource_name + for i in range(len(self.subresource)): + resource_ns = self.subresource[i].get('namespace', None) + resource_type = self.subresource[i].get('type', None) + resource_name = self.subresource[i].get('name', None) + if resource_type is not None and resource_name is not None: + rargs['child_namespace_' + str(i + 1)] = resource_ns + rargs['child_type_' + str(i + 1)] = resource_type + rargs['child_name_' + str(i + 1)] = resource_name + else: + orphan = resource_type + else: + orphan = self.resource_type + + self.url = resource_id(**rargs) + + if orphan is not None: + self.url += '/' + orphan + + # if api_version was not specified, get latest one + if not self.api_version: + try: + # extract provider and resource type + if "/providers/" in self.url: + provider = self.url.split("/providers/")[1].split("/")[0] + resourceType = self.url.split(provider + "/")[1].split("/")[0] + url = "/subscriptions/" + self.subscription_id + "/providers/" + provider + api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text) + for rt in api_versions['resourceTypes']: + if rt['resourceType'].lower() == resourceType.lower(): + self.api_version = rt['apiVersions'][0] + break + else: + # if there's no provider in API version, assume Microsoft.Resources + self.api_version = '2018-05-01' + if not self.api_version: + self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType)) + except Exception as exc: + self.fail("Failed to obtain API version: {0}".format(str(exc))) + + query_parameters = {} + query_parameters['api-version'] = self.api_version + + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + needs_update = True + response = None + + if self.idempotency: + original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0) + + if original.status_code == 404: + if self.state == 'absent': + needs_update = False + else: + try: + response = json.loads(original.text) + needs_update = (dict_merge(response, self.body) != response) + except Exception: + pass + + if needs_update: + response = self.mgmt_client.query(self.url, + self.method, + query_parameters, + header_parameters, + self.body, + self.status_code, + self.polling_timeout, + self.polling_interval) + if self.state == 'present': + try: + response = json.loads(response.text) + except Exception: + response = response.text + else: + response = None + + self.results['response'] = response + self.results['changed'] = needs_update + + return self.results + + +def main(): + AzureRMResource() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_resource_info.py b/test/support/integration/plugins/modules/azure_rm_resource_info.py new file mode 100644 index 00000000..f797f662 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_resource_info.py @@ -0,0 +1,432 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_resource_info +version_added: "2.9" +short_description: Generic facts of Azure resources +description: + - Obtain facts of any resource using Azure REST API. + - This module gives access to resources that are not supported via Ansible modules. + - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API. + +options: + url: + description: + - Azure RM Resource URL. + api_version: + description: + - Specific API version to be used. + provider: + description: + - Provider type, should be specified in no URL is given. + resource_group: + description: + - Resource group to be used. + - Required if URL is not specified. + resource_type: + description: + - Resource type. + resource_name: + description: + - Resource name. + subresource: + description: + - List of subresources. + suboptions: + namespace: + description: + - Subresource namespace. + type: + description: + - Subresource type. + name: + description: + - Subresource name. + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get scaleset info + azure_rm_resource_info: + resource_group: myResourceGroup + provider: compute + resource_type: virtualmachinescalesets + resource_name: myVmss + api_version: "2017-12-01" + + - name: Query all the resources in the resource group + azure_rm_resource_info: + resource_group: "{{ resource_group }}" + resource_type: resources +''' + +RETURN = ''' +response: + description: + - Response specific to resource type. + returned: always + type: complex + contains: + id: + description: + - Id of the Azure resource. + type: str + returned: always + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/virtualMachines/myVM" + location: + description: + - Resource location. + type: str + returned: always + sample: eastus + name: + description: + - Resource name. + type: str + returned: always + sample: myVM + properties: + description: + - Specifies the virtual machine's property. + type: complex + returned: always + contains: + diagnosticsProfile: + description: + - Specifies the boot diagnostic settings state. + type: complex + returned: always + contains: + bootDiagnostics: + description: + - A debugging feature, which to view Console Output and Screenshot to diagnose VM status. + type: dict + returned: always + sample: { + "enabled": true, + "storageUri": "https://vxisurgdiag.blob.core.windows.net/" + } + hardwareProfile: + description: + - Specifies the hardware settings for the virtual machine. + type: dict + returned: always + sample: { + "vmSize": "Standard_D2s_v3" + } + networkProfile: + description: + - Specifies the network interfaces of the virtual machine. + type: complex + returned: always + contains: + networkInterfaces: + description: + - Describes a network interface reference. + type: list + returned: always + sample: + - { + "id": "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/networkInterfaces/myvm441" + } + osProfile: + description: + - Specifies the operating system settings for the virtual machine. + type: complex + returned: always + contains: + adminUsername: + description: + - Specifies the name of the administrator account. + type: str + returned: always + sample: azureuser + allowExtensionOperations: + description: + - Specifies whether extension operations should be allowed on the virtual machine. + - This may only be set to False when no extensions are present on the virtual machine. + type: bool + returned: always + sample: true + computerName: + description: + - Specifies the host OS name of the virtual machine. + type: str + returned: always + sample: myVM + requireGuestProvisionSignale: + description: + - Specifies the host require guest provision signal or not. + type: bool + returned: always + sample: true + secrets: + description: + - Specifies set of certificates that should be installed onto the virtual machine. + type: list + returned: always + sample: [] + linuxConfiguration: + description: + - Specifies the Linux operating system settings on the virtual machine. + type: dict + returned: when OS type is Linux + sample: { + "disablePasswordAuthentication": false, + "provisionVMAgent": true + } + provisioningState: + description: + - The provisioning state. + type: str + returned: always + sample: Succeeded + vmID: + description: + - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure laaS VMs SMBIOS. + - It can be read using platform BIOS commands. + type: str + returned: always + sample: "eb86d9bb-6725-4787-a487-2e497d5b340c" + storageProfile: + description: + - Specifies the storage account type for the managed disk. + type: complex + returned: always + contains: + dataDisks: + description: + - Specifies the parameters that are used to add a data disk to virtual machine. + type: list + returned: always + sample: + - { + "caching": "None", + "createOption": "Attach", + "diskSizeGB": 1023, + "lun": 2, + "managedDisk": { + "id": "/subscriptions/xxxx....xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk2", + "storageAccountType": "StandardSSD_LRS" + }, + "name": "testdisk2" + } + - { + "caching": "None", + "createOption": "Attach", + "diskSizeGB": 1023, + "lun": 1, + "managedDisk": { + "id": "/subscriptions/xxxx...xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk3", + "storageAccountType": "StandardSSD_LRS" + }, + "name": "testdisk3" + } + + imageReference: + description: + - Specifies information about the image to use. + type: dict + returned: always + sample: { + "offer": "UbuntuServer", + "publisher": "Canonical", + "sku": "18.04-LTS", + "version": "latest" + } + osDisk: + description: + - Specifies information about the operating system disk used by the virtual machine. + type: dict + returned: always + sample: { + "caching": "ReadWrite", + "createOption": "FromImage", + "diskSizeGB": 30, + "managedDisk": { + "id": "/subscriptions/xxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/disks/myVM_disk1_xxx", + "storageAccountType": "Premium_LRS" + }, + "name": "myVM_disk1_xxx", + "osType": "Linux" + } + type: + description: + - The type of identity used for the virtual machine. + type: str + returned: always + sample: "Microsoft.Compute/virtualMachines" +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.azure_rm_common_rest import GenericRestClient + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.service_client import ServiceClient + from msrestazure.tools import resource_id, is_valid_resource_id + import json + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMResourceInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + url=dict( + type='str' + ), + provider=dict( + type='str' + ), + resource_group=dict( + type='str' + ), + resource_type=dict( + type='str' + ), + resource_name=dict( + type='str' + ), + subresource=dict( + type='list', + default=[] + ), + api_version=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + response=[] + ) + self.mgmt_client = None + self.url = None + self.api_version = None + self.provider = None + self.resource_group = None + self.resource_type = None + self.resource_name = None + self.subresource = [] + super(AzureRMResourceInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_resource_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_resource_facts' module has been renamed to 'azure_rm_resource_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.url is None: + orphan = None + rargs = dict() + rargs['subscription'] = self.subscription_id + rargs['resource_group'] = self.resource_group + if not (self.provider is None or self.provider.lower().startswith('.microsoft')): + rargs['namespace'] = "Microsoft." + self.provider + else: + rargs['namespace'] = self.provider + + if self.resource_type is not None and self.resource_name is not None: + rargs['type'] = self.resource_type + rargs['name'] = self.resource_name + for i in range(len(self.subresource)): + resource_ns = self.subresource[i].get('namespace', None) + resource_type = self.subresource[i].get('type', None) + resource_name = self.subresource[i].get('name', None) + if resource_type is not None and resource_name is not None: + rargs['child_namespace_' + str(i + 1)] = resource_ns + rargs['child_type_' + str(i + 1)] = resource_type + rargs['child_name_' + str(i + 1)] = resource_name + else: + orphan = resource_type + else: + orphan = self.resource_type + + self.url = resource_id(**rargs) + + if orphan is not None: + self.url += '/' + orphan + + # if api_version was not specified, get latest one + if not self.api_version: + try: + # extract provider and resource type + if "/providers/" in self.url: + provider = self.url.split("/providers/")[1].split("/")[0] + resourceType = self.url.split(provider + "/")[1].split("/")[0] + url = "/subscriptions/" + self.subscription_id + "/providers/" + provider + api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text) + for rt in api_versions['resourceTypes']: + if rt['resourceType'].lower() == resourceType.lower(): + self.api_version = rt['apiVersions'][0] + break + else: + # if there's no provider in API version, assume Microsoft.Resources + self.api_version = '2018-05-01' + if not self.api_version: + self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType)) + except Exception as exc: + self.fail("Failed to obtain API version: {0}".format(str(exc))) + + self.results['url'] = self.url + + query_parameters = {} + query_parameters['api-version'] = self.api_version + + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + skiptoken = None + + while True: + if skiptoken: + query_parameters['skiptoken'] = skiptoken + response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0) + try: + response = json.loads(response.text) + if isinstance(response, dict): + if response.get('value'): + self.results['response'] = self.results['response'] + response['value'] + skiptoken = response.get('nextLink') + else: + self.results['response'] = self.results['response'] + [response] + except Exception as e: + self.fail('Failed to parse response: ' + str(e)) + if not skiptoken: + break + return self.results + + +def main(): + AzureRMResourceInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_storageaccount.py b/test/support/integration/plugins/modules/azure_rm_storageaccount.py new file mode 100644 index 00000000..d4158bbd --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_storageaccount.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com> +# Chris Houseknecht, <house@redhat.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_storageaccount +version_added: "2.1" +short_description: Manage Azure storage accounts +description: + - Create, update or delete a storage account. +options: + resource_group: + description: + - Name of the resource group to use. + required: true + aliases: + - resource_group_name + name: + description: + - Name of the storage account to update or create. + state: + description: + - State of the storage account. Use C(present) to create or update a storage account and use C(absent) to delete an account. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + account_type: + description: + - Type of storage account. Required when creating a storage account. + - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types. + - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS). + choices: + - Premium_LRS + - Standard_GRS + - Standard_LRS + - StandardSSD_LRS + - Standard_RAGRS + - Standard_ZRS + - Premium_ZRS + aliases: + - type + custom_domain: + description: + - User domain assigned to the storage account. + - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source. + - Only one custom domain is supported per storage account at this time. + - To clear the existing custom domain, use an empty string for the custom domain name property. + - Can be added to an existing storage account. Will be ignored during storage account creation. + aliases: + - custom_dns_domain_suffix + kind: + description: + - The kind of storage. + default: 'Storage' + choices: + - Storage + - StorageV2 + - BlobStorage + version_added: "2.2" + access_tier: + description: + - The access tier for this storage account. Required when I(kind=BlobStorage). + choices: + - Hot + - Cool + version_added: "2.4" + force_delete_nonempty: + description: + - Attempt deletion if resource already exists and cannot be updated. + type: bool + aliases: + - force + https_only: + description: + - Allows https traffic only to storage service when set to C(true). + type: bool + version_added: "2.8" + blob_cors: + description: + - Specifies CORS rules for the Blob service. + - You can include up to five CorsRule elements in the request. + - If no blob_cors elements are included in the argument list, nothing about CORS will be changed. + - If you want to delete all CORS rules and disable CORS for the Blob service, explicitly set I(blob_cors=[]). + type: list + version_added: "2.8" + suboptions: + allowed_origins: + description: + - A list of origin domains that will be allowed via CORS, or "*" to allow all domains. + type: list + required: true + allowed_methods: + description: + - A list of HTTP methods that are allowed to be executed by the origin. + type: list + required: true + max_age_in_seconds: + description: + - The number of seconds that the client/browser should cache a preflight response. + type: int + required: true + exposed_headers: + description: + - A list of response headers to expose to CORS clients. + type: list + required: true + allowed_headers: + description: + - A list of headers allowed to be part of the cross-origin request. + type: list + required: true + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = ''' + - name: remove account, if it exists + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh0002 + state: absent + + - name: create an account + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh0002 + type: Standard_RAGRS + tags: + testing: testing + delete: on-exit + + - name: create an account with blob CORS + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh002 + type: Standard_RAGRS + blob_cors: + - allowed_origins: + - http://www.example.com/ + allowed_methods: + - GET + - POST + allowed_headers: + - x-ms-meta-data* + - x-ms-meta-target* + - x-ms-meta-abc + exposed_headers: + - x-ms-meta-* + max_age_in_seconds: 200 +''' + + +RETURN = ''' +state: + description: + - Current state of the storage account. + returned: always + type: complex + contains: + account_type: + description: + - Type of storage account. + returned: always + type: str + sample: Standard_RAGRS + custom_domain: + description: + - User domain assigned to the storage account. + returned: always + type: complex + contains: + name: + description: + - CNAME source. + returned: always + type: str + sample: testaccount + use_sub_domain: + description: + - Whether to use sub domain. + returned: always + type: bool + sample: true + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/clh0003" + location: + description: + - Valid Azure location. Defaults to location of the resource group. + returned: always + type: str + sample: eastus2 + name: + description: + - Name of the storage account to update or create. + returned: always + type: str + sample: clh0003 + primary_endpoints: + description: + - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the primary location. + returned: always + type: dict + sample: { + "blob": "https://clh0003.blob.core.windows.net/", + "queue": "https://clh0003.queue.core.windows.net/", + "table": "https://clh0003.table.core.windows.net/" + } + primary_location: + description: + - The location of the primary data center for the storage account. + returned: always + type: str + sample: eastus2 + provisioning_state: + description: + - The status of the storage account. + - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded). + returned: always + type: str + sample: Succeeded + resource_group: + description: + - The resource group's name. + returned: always + type: str + sample: Testing + secondary_endpoints: + description: + - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the secondary location. + returned: always + type: dict + sample: { + "blob": "https://clh0003-secondary.blob.core.windows.net/", + "queue": "https://clh0003-secondary.queue.core.windows.net/", + "table": "https://clh0003-secondary.table.core.windows.net/" + } + secondary_location: + description: + - The location of the geo-replicated secondary for the storage account. + returned: always + type: str + sample: centralus + status_of_primary: + description: + - The status of the primary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + status_of_secondary: + description: + - The status of the secondary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'tags1': 'value1' } + type: + description: + - The storage account type. + returned: always + type: str + sample: "Microsoft.Storage/storageAccounts" +''' + +try: + from msrestazure.azure_exceptions import CloudError + from azure.storage.cloudstorageaccount import CloudStorageAccount + from azure.common import AzureMissingResourceHttpError +except ImportError: + # This is handled in azure_rm_common + pass + +import copy +from ansible.module_utils.azure_rm_common import AZURE_SUCCESS_STATE, AzureRMModuleBase +from ansible.module_utils._text import to_native + +cors_rule_spec = dict( + allowed_origins=dict(type='list', elements='str', required=True), + allowed_methods=dict(type='list', elements='str', required=True), + max_age_in_seconds=dict(type='int', required=True), + exposed_headers=dict(type='list', elements='str', required=True), + allowed_headers=dict(type='list', elements='str', required=True), +) + + +def compare_cors(cors1, cors2): + if len(cors1) != len(cors2): + return False + copy2 = copy.copy(cors2) + for rule1 in cors1: + matched = False + for rule2 in copy2: + if (rule1['max_age_in_seconds'] == rule2['max_age_in_seconds'] + and set(rule1['allowed_methods']) == set(rule2['allowed_methods']) + and set(rule1['allowed_origins']) == set(rule2['allowed_origins']) + and set(rule1['allowed_headers']) == set(rule2['allowed_headers']) + and set(rule1['exposed_headers']) == set(rule2['exposed_headers'])): + matched = True + copy2.remove(rule2) + if not matched: + return False + return True + + +class AzureRMStorageAccount(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + account_type=dict(type='str', + choices=['Premium_LRS', 'Standard_GRS', 'Standard_LRS', 'StandardSSD_LRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_ZRS'], + aliases=['type']), + custom_domain=dict(type='dict', aliases=['custom_dns_domain_suffix']), + location=dict(type='str'), + name=dict(type='str', required=True), + resource_group=dict(required=True, type='str', aliases=['resource_group_name']), + state=dict(default='present', choices=['present', 'absent']), + force_delete_nonempty=dict(type='bool', default=False, aliases=['force']), + tags=dict(type='dict'), + kind=dict(type='str', default='Storage', choices=['Storage', 'StorageV2', 'BlobStorage']), + access_tier=dict(type='str', choices=['Hot', 'Cool']), + https_only=dict(type='bool', default=False), + blob_cors=dict(type='list', options=cors_rule_spec, elements='dict') + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.account_dict = None + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.account_type = None + self.custom_domain = None + self.tags = None + self.force_delete_nonempty = None + self.kind = None + self.access_tier = None + self.https_only = None + self.blob_cors = None + + super(AzureRMStorageAccount, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if len(self.name) < 3 or len(self.name) > 24: + self.fail("Parameter error: name length must be between 3 and 24 characters.") + + if self.custom_domain: + if self.custom_domain.get('name', None) is None: + self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.") + if self.custom_domain.get('use_sub_domain', None) is None: + self.fail("Parameter error: expecting custom_domain to have a use_sub_domain " + "attribute of type boolean.") + + self.account_dict = self.get_account() + + if self.state == 'present' and self.account_dict and \ + self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE: + self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state " + "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE)) + + if self.account_dict is not None: + self.results['state'] = self.account_dict + else: + self.results['state'] = dict() + + if self.state == 'present': + if not self.account_dict: + self.results['state'] = self.create_account() + else: + self.update_account() + elif self.state == 'absent' and self.account_dict: + self.delete_account() + self.results['state'] = dict(Status='Deleted') + + return self.results + + def check_name_availability(self): + self.log('Checking name availability for {0}'.format(self.name)) + try: + response = self.storage_client.storage_accounts.check_name_availability(self.name) + except CloudError as e: + self.log('Error attempting to validate name.') + self.fail("Error checking name availability: {0}".format(str(e))) + if not response.name_available: + self.log('Error name not available.') + self.fail("{0} - {1}".format(response.message, response.reason)) + + def get_account(self): + self.log('Get properties for account {0}'.format(self.name)) + account_obj = None + blob_service_props = None + account_dict = None + + try: + account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name) + blob_service_props = self.storage_client.blob_services.get_service_properties(self.resource_group, self.name) + except CloudError: + pass + + if account_obj: + account_dict = self.account_obj_to_dict(account_obj, blob_service_props) + + return account_dict + + def account_obj_to_dict(self, account_obj, blob_service_props=None): + account_dict = dict( + id=account_obj.id, + name=account_obj.name, + location=account_obj.location, + resource_group=self.resource_group, + type=account_obj.type, + access_tier=(account_obj.access_tier.value + if account_obj.access_tier is not None else None), + sku_tier=account_obj.sku.tier.value, + sku_name=account_obj.sku.name.value, + provisioning_state=account_obj.provisioning_state.value, + secondary_location=account_obj.secondary_location, + status_of_primary=(account_obj.status_of_primary.value + if account_obj.status_of_primary is not None else None), + status_of_secondary=(account_obj.status_of_secondary.value + if account_obj.status_of_secondary is not None else None), + primary_location=account_obj.primary_location, + https_only=account_obj.enable_https_traffic_only + ) + account_dict['custom_domain'] = None + if account_obj.custom_domain: + account_dict['custom_domain'] = dict( + name=account_obj.custom_domain.name, + use_sub_domain=account_obj.custom_domain.use_sub_domain + ) + + account_dict['primary_endpoints'] = None + if account_obj.primary_endpoints: + account_dict['primary_endpoints'] = dict( + blob=account_obj.primary_endpoints.blob, + queue=account_obj.primary_endpoints.queue, + table=account_obj.primary_endpoints.table + ) + account_dict['secondary_endpoints'] = None + if account_obj.secondary_endpoints: + account_dict['secondary_endpoints'] = dict( + blob=account_obj.secondary_endpoints.blob, + queue=account_obj.secondary_endpoints.queue, + table=account_obj.secondary_endpoints.table + ) + account_dict['tags'] = None + if account_obj.tags: + account_dict['tags'] = account_obj.tags + if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules: + account_dict['blob_cors'] = [dict( + allowed_origins=[to_native(y) for y in x.allowed_origins], + allowed_methods=[to_native(y) for y in x.allowed_methods], + max_age_in_seconds=x.max_age_in_seconds, + exposed_headers=[to_native(y) for y in x.exposed_headers], + allowed_headers=[to_native(y) for y in x.allowed_headers] + ) for x in blob_service_props.cors.cors_rules] + return account_dict + + def update_account(self): + self.log('Update storage account {0}'.format(self.name)) + if bool(self.https_only) != bool(self.account_dict.get('https_only')): + self.results['changed'] = True + self.account_dict['https_only'] = self.https_only + if not self.check_mode: + try: + parameters = self.storage_models.StorageAccountUpdateParameters(enable_https_traffic_only=self.https_only) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + if self.account_type: + if self.account_type != self.account_dict['sku_name']: + # change the account type + SkuName = self.storage_models.SkuName + if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage accounts of type {0} and {1} cannot be changed.".format( + SkuName.premium_lrs, SkuName.standard_zrs)) + if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format( + self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs)) + + self.results['changed'] = True + self.account_dict['sku_name'] = self.account_type + + if self.results['changed'] and not self.check_mode: + # Perform the update. The API only allows changing one attribute per call. + try: + self.log("sku_name: %s" % self.account_dict['sku_name']) + self.log("sku_tier: %s" % self.account_dict['sku_tier']) + sku = self.storage_models.Sku(name=SkuName(self.account_dict['sku_name'])) + sku.tier = self.storage_models.SkuTier(self.account_dict['sku_tier']) + parameters = self.storage_models.StorageAccountUpdateParameters(sku=sku) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + if self.custom_domain: + if not self.account_dict['custom_domain'] or self.account_dict['custom_domain'] != self.custom_domain: + self.results['changed'] = True + self.account_dict['custom_domain'] = self.custom_domain + + if self.results['changed'] and not self.check_mode: + new_domain = self.storage_models.CustomDomain(name=self.custom_domain['name'], + use_sub_domain=self.custom_domain['use_sub_domain']) + parameters = self.storage_models.StorageAccountUpdateParameters(custom_domain=new_domain) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update custom domain: {0}".format(str(exc))) + + if self.access_tier: + if not self.account_dict['access_tier'] or self.account_dict['access_tier'] != self.access_tier: + self.results['changed'] = True + self.account_dict['access_tier'] = self.access_tier + + if self.results['changed'] and not self.check_mode: + parameters = self.storage_models.StorageAccountUpdateParameters(access_tier=self.access_tier) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update access tier: {0}".format(str(exc))) + + update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags']) + if update_tags: + self.results['changed'] = True + if not self.check_mode: + parameters = self.storage_models.StorageAccountUpdateParameters(tags=self.account_dict['tags']) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update tags: {0}".format(str(exc))) + + if self.blob_cors and not compare_cors(self.account_dict.get('blob_cors', []), self.blob_cors): + self.results['changed'] = True + if not self.check_mode: + self.set_blob_cors() + + def create_account(self): + self.log("Creating account {0}".format(self.name)) + + if not self.location: + self.fail('Parameter error: location required when creating a storage account.') + + if not self.account_type: + self.fail('Parameter error: account_type required when creating a storage account.') + + if not self.access_tier and self.kind == 'BlobStorage': + self.fail('Parameter error: access_tier required when creating a storage account of type BlobStorage.') + + self.check_name_availability() + self.results['changed'] = True + + if self.check_mode: + account_dict = dict( + location=self.location, + account_type=self.account_type, + name=self.name, + resource_group=self.resource_group, + enable_https_traffic_only=self.https_only, + tags=dict() + ) + if self.tags: + account_dict['tags'] = self.tags + if self.blob_cors: + account_dict['blob_cors'] = self.blob_cors + return account_dict + sku = self.storage_models.Sku(name=self.storage_models.SkuName(self.account_type)) + sku.tier = self.storage_models.SkuTier.standard if 'Standard' in self.account_type else \ + self.storage_models.SkuTier.premium + parameters = self.storage_models.StorageAccountCreateParameters(sku=sku, + kind=self.kind, + location=self.location, + tags=self.tags, + access_tier=self.access_tier) + self.log(str(parameters)) + try: + poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters) + self.get_poller_result(poller) + except CloudError as e: + self.log('Error creating storage account.') + self.fail("Failed to create account: {0}".format(str(e))) + if self.blob_cors: + self.set_blob_cors() + # the poller doesn't actually return anything + return self.get_account() + + def delete_account(self): + if self.account_dict['provisioning_state'] == self.storage_models.ProvisioningState.succeeded.value and \ + not self.force_delete_nonempty and self.account_has_blob_containers(): + self.fail("Account contains blob containers. Is it in use? Use the force_delete_nonempty option to attempt deletion.") + + self.log('Delete storage account {0}'.format(self.name)) + self.results['changed'] = True + if not self.check_mode: + try: + status = self.storage_client.storage_accounts.delete(self.resource_group, self.name) + self.log("delete status: ") + self.log(str(status)) + except CloudError as e: + self.fail("Failed to delete the account: {0}".format(str(e))) + return True + + def account_has_blob_containers(self): + ''' + If there are blob containers, then there are likely VMs depending on this account and it should + not be deleted. + ''' + self.log('Checking for existing blob containers') + blob_service = self.get_blob_client(self.resource_group, self.name) + try: + response = blob_service.list_containers() + except AzureMissingResourceHttpError: + # No blob storage available? + return False + + if len(response.items) > 0: + return True + return False + + def set_blob_cors(self): + try: + cors_rules = self.storage_models.CorsRules(cors_rules=[self.storage_models.CorsRule(**x) for x in self.blob_cors]) + self.storage_client.blob_services.set_service_properties(self.resource_group, + self.name, + self.storage_models.BlobServiceProperties(cors=cors_rules)) + except Exception as exc: + self.fail("Failed to set CORS rules: {0}".format(str(exc))) + + +def main(): + AzureRMStorageAccount() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_webapp.py b/test/support/integration/plugins/modules/azure_rm_webapp.py new file mode 100644 index 00000000..4f185f45 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_webapp.py @@ -0,0 +1,1070 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_webapp +version_added: "2.7" +short_description: Manage Web App instances +description: + - Create, update and delete instance of Web App. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + name: + description: + - Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter. + required: True + + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + + plan: + description: + - App service plan. Required for creation. + - Can be name of existing app service plan in same resource group as web app. + - Can be the resource ID of an existing app service plan. For example + /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>. + - Can be a dict containing five parameters, defined below. + - C(name), name of app service plan. + - C(resource_group), resource group of the app service plan. + - C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/). + - C(is_linux), whether or not the app service plan is Linux. defaults to C(False). + - C(number_of_workers), number of workers for app service plan. + + frameworks: + description: + - Set of run time framework settings. Each setting is a dictionary. + - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info. + suboptions: + name: + description: + - Name of the framework. + - Supported framework list for Windows web app and Linux web app is different. + - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018. + - Windows web apps support multiple framework at the same time. + - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018. + - Linux web apps support only one framework. + - Java framework is mutually exclusive with others. + choices: + - java + - net_framework + - php + - python + - ruby + - dotnetcore + - node + version: + description: + - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info. + - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5. + - C(php) supported value sample, C(5.5), C(5.6), C(7.0). + - C(python) supported value sample, C(5.5), C(5.6), C(7.0). + - C(node) supported value sample, C(6.6), C(6.9). + - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2). + - C(ruby) supported value sample, C(2.3). + - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app. + settings: + description: + - List of settings of the framework. + suboptions: + java_container: + description: + - Name of Java container. + - Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty). + java_container_version: + description: + - Version of Java container. + - Supported only when I(frameworks=java). + - Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3). + + container_settings: + description: + - Web app container settings. + suboptions: + name: + description: + - Name of container, for example C(imagename:tag). + registry_server_url: + description: + - Container registry server URL, for example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + + scm_type: + description: + - Repository type of deployment source, for example C(LocalGit), C(GitHub). + - List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype). + + deployment_source: + description: + - Deployment source for git. + suboptions: + url: + description: + - Repository url of deployment source. + + branch: + description: + - The branch name of the repository. + startup_file: + description: + - The web's startup file. + - Used only for Linux web apps. + + client_affinity_enabled: + description: + - Whether or not to send session affinity cookies, which route client requests in the same session to the same instance. + type: bool + default: True + + https_only: + description: + - Configures web site to accept only https requests. + type: bool + + dns_registration: + description: + - Whether or not the web app hostname is registered with DNS on creation. Set to C(false) to register. + type: bool + + skip_custom_domain_verification: + description: + - Whether or not to skip verification of custom (non *.azurewebsites.net) domains associated with web app. Set to C(true) to skip. + type: bool + + ttl_in_seconds: + description: + - Time to live in seconds for web app default domain name. + + app_settings: + description: + - Configure web app application settings. Suboptions are in key value pair format. + + purge_app_settings: + description: + - Purge any existing application settings. Replace web app application settings with app_settings. + type: bool + + app_state: + description: + - Start/Stop/Restart the web app. + type: str + choices: + - started + - stopped + - restarted + default: started + + state: + description: + - State of the Web App. + - Use C(present) to create or update a Web App and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a windows web app with non-exist app service plan + azure_rm_webapp: + resource_group: myResourceGroup + name: myWinWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + is_linux: false + sku: S1 + + - name: Create a docker web app with some app settings, with docker image + azure_rm_webapp: + resource_group: myResourceGroup + name: myDockerWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + is_linux: true + sku: S1 + number_of_workers: 2 + app_settings: + testkey: testvalue + testkey2: testvalue2 + container_settings: + name: ansible/ansible:ubuntu1404 + + - name: Create a docker web app with private acr registry + azure_rm_webapp: + resource_group: myResourceGroup + name: myDockerWebapp + plan: myAppServicePlan + app_settings: + testkey: testvalue + container_settings: + name: ansible/ubuntu1404 + registry_server_url: myregistry.io + registry_server_user: user + registry_server_password: pass + + - name: Create a linux web app with Node 6.6 framework + azure_rm_webapp: + resource_group: myResourceGroup + name: myLinuxWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "node" + version: "6.6" + + - name: Create a windows web app with node, php + azure_rm_webapp: + resource_group: myResourceGroup + name: myWinWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "node" + version: 6.6 + - name: "php" + version: "7.0" + + - name: Create a stage deployment slot for an existing web app + azure_rm_webapp: + resource_group: myResourceGroup + name: myWebapp/slots/stage + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey:testvalue + + - name: Create a linux web app with java framework + azure_rm_webapp: + resource_group: myResourceGroup + name: myLinuxWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "java" + version: "8" + settings: + java_container: "Tomcat" + java_container_version: "8.5" +''' + +RETURN = ''' +azure_webapp: + description: + - ID of current web app. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp" +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model + from azure.mgmt.web.models import ( + site_config, app_service_plan, Site, + AppServicePlan, SkuDescription, NameValuePair + ) +except ImportError: + # This is handled in azure_rm_common + pass + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + +deployment_source_spec = dict( + url=dict(type='str'), + branch=dict(type='str') +) + + +framework_settings_spec = dict( + java_container=dict(type='str', required=True), + java_container_version=dict(type='str', required=True) +) + + +framework_spec = dict( + name=dict( + type='str', + required=True, + choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']), + version=dict(type='str', required=True), + settings=dict(type='dict', options=framework_settings_spec) +) + + +def _normalize_sku(sku): + if sku is None: + return sku + + sku = sku.upper() + if sku == 'FREE': + return 'F1' + elif sku == 'SHARED': + return 'D1' + return sku + + +def get_sku_name(tier): + tier = tier.upper() + if tier == 'F1' or tier == "FREE": + return 'FREE' + elif tier == 'D1' or tier == "SHARED": + return 'SHARED' + elif tier in ['B1', 'B2', 'B3', 'BASIC']: + return 'BASIC' + elif tier in ['S1', 'S2', 'S3']: + return 'STANDARD' + elif tier in ['P1', 'P2', 'P3']: + return 'PREMIUM' + elif tier in ['P1V2', 'P2V2', 'P3V2']: + return 'PREMIUMV2' + else: + return None + + +def appserviceplan_to_dict(plan): + return dict( + id=plan.id, + name=plan.name, + kind=plan.kind, + location=plan.location, + reserved=plan.reserved, + is_linux=plan.reserved, + provisioning_state=plan.provisioning_state, + tags=plan.tags if plan.tags else None + ) + + +def webapp_to_dict(webapp): + return dict( + id=webapp.id, + name=webapp.name, + location=webapp.location, + client_cert_enabled=webapp.client_cert_enabled, + enabled=webapp.enabled, + reserved=webapp.reserved, + client_affinity_enabled=webapp.client_affinity_enabled, + server_farm_id=webapp.server_farm_id, + host_names_disabled=webapp.host_names_disabled, + https_only=webapp.https_only if hasattr(webapp, 'https_only') else None, + skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None, + ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None, + state=webapp.state, + tags=webapp.tags if webapp.tags else None + ) + + +class Actions: + CreateOrUpdate, UpdateAppSettings, Delete = range(3) + + +class AzureRMWebApps(AzureRMModuleBase): + """Configuration class for an Azure RM Web App resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + plan=dict( + type='raw' + ), + frameworks=dict( + type='list', + elements='dict', + options=framework_spec + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ), + scm_type=dict( + type='str', + ), + deployment_source=dict( + type='dict', + options=deployment_source_spec + ), + startup_file=dict( + type='str' + ), + client_affinity_enabled=dict( + type='bool', + default=True + ), + dns_registration=dict( + type='bool' + ), + https_only=dict( + type='bool' + ), + skip_custom_domain_verification=dict( + type='bool' + ), + ttl_in_seconds=dict( + type='int' + ), + app_settings=dict( + type='dict' + ), + purge_app_settings=dict( + type='bool', + default=False + ), + app_state=dict( + type='str', + choices=['started', 'stopped', 'restarted'], + default='started' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + mutually_exclusive = [['container_settings', 'frameworks']] + + self.resource_group = None + self.name = None + self.location = None + + # update in create_or_update as parameters + self.client_affinity_enabled = True + self.dns_registration = None + self.skip_custom_domain_verification = None + self.ttl_in_seconds = None + self.https_only = None + + self.tags = None + + # site config, e.g app settings, ssl + self.site_config = dict() + self.app_settings = dict() + self.app_settings_strDic = None + + # app service plan + self.plan = None + + # siteSourceControl + self.deployment_source = dict() + + # site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args + self.site = None + + # property for internal usage, not used for sdk + self.container_settings = None + + self.purge_app_settings = False + self.app_state = 'started' + + self.results = dict( + changed=False, + id=None, + ) + self.state = None + self.to_do = [] + + self.frameworks = None + + # set site_config value from kwargs + self.site_config_updatable_properties = ["net_framework_version", + "java_version", + "php_version", + "python_version", + "scm_type"] + + # updatable_properties + self.updatable_properties = ["client_affinity_enabled", + "force_dns_registration", + "https_only", + "skip_custom_domain_verification", + "ttl_in_seconds"] + + self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java'] + self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java'] + + super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "scm_type": + self.site_config[key] = kwargs[key] + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get existing web app + old_response = self.get_webapp() + + if old_response: + self.results['id'] = old_response['id'] + + if self.state == 'present': + if not self.plan and not old_response: + self.fail("Please specify plan for newly created web app.") + + if not self.plan: + self.plan = old_response['server_farm_id'] + + self.plan = self.parse_resource_to_dict(self.plan) + + # get app service plan + is_linux = False + old_plan = self.get_app_service_plan() + if old_plan: + is_linux = old_plan['reserved'] + else: + is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False + + if self.frameworks: + # java is mutually exclusive with other frameworks + if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks): + self.fail('Java is mutually exclusive with other frameworks.') + + if is_linux: + if len(self.frameworks) != 1: + self.fail('Can specify one framework only for Linux web app.') + + if self.frameworks[0]['name'] not in self.supported_linux_frameworks: + self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name'])) + + self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper() + + if self.frameworks[0]['name'] == 'java': + if self.frameworks[0]['version'] != '8': + self.fail("Linux web app only supports java 8.") + if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat': + self.fail("Linux web app only supports tomcat container.") + + if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat': + self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8' + else: + self.site_config['linux_fx_version'] = 'JAVA|8-jre8' + else: + for fx in self.frameworks: + if fx.get('name') not in self.supported_windows_frameworks: + self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name'))) + else: + self.site_config[fx.get('name') + '_version'] = fx.get('version') + + if 'settings' in fx and fx['settings'] is not None: + for key, value in fx['settings'].items(): + self.site_config[key] = value + + if not self.app_settings: + self.app_settings = dict() + + if self.container_settings: + linux_fx_version = 'DOCKER|' + + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + + linux_fx_version += self.container_settings['registry_server_url'] + '/' + + linux_fx_version += self.container_settings['name'] + + self.site_config['linux_fx_version'] = linux_fx_version + + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user'] + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password'] + + # init site + self.site = Site(location=self.location, site_config=self.site_config) + + if self.https_only is not None: + self.site.https_only = self.https_only + + if self.client_affinity_enabled: + self.site.client_affinity_enabled = self.client_affinity_enabled + + # check if the web app already present in the resource group + if not old_response: + self.log("Web App instance doesn't exist") + + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + self.site.tags = self.tags + + # service plan is required for creation + if not self.plan: + self.fail("Please specify app service plan in plan parameter.") + + if not old_plan: + # no existing service plan, create one + if (not self.plan.get('name') or not self.plan.get('sku')): + self.fail('Please specify name, is_linux, sku in plan') + + if 'location' not in self.plan: + plan_resource_group = self.get_resource_group(self.plan['resource_group']) + self.plan['location'] = plan_resource_group.location + + old_plan = self.create_app_service_plan() + + self.site.server_farm_id = old_plan['id'] + + # if linux, setup startup_file + if old_plan['is_linux']: + if hasattr(self, 'startup_file'): + self.site_config['app_command_line'] = self.startup_file + + # set app setting + if self.app_settings: + app_settings = [] + for key in self.app_settings.keys(): + app_settings.append(NameValuePair(name=key, value=self.app_settings[key])) + + self.site_config['app_settings'] = app_settings + else: + # existing web app, do update + self.log("Web App instance already exists") + + self.log('Result: {0}'.format(old_response)) + + update_tags, self.site.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + + # check if root level property changed + if self.is_updatable_property_changed(old_response): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + # check if site_config changed + old_config = self.get_webapp_configuration() + + if self.is_site_config_changed(old_config): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + # check if linux_fx_version changed + if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + self.app_settings_strDic = self.list_app_settings() + + # purge existing app_settings: + if self.purge_app_settings: + to_be_updated = True + self.app_settings_strDic = dict() + self.to_do.append(Actions.UpdateAppSettings) + + # check if app settings changed + if self.purge_app_settings or self.is_app_settings_changed(): + to_be_updated = True + self.to_do.append(Actions.UpdateAppSettings) + + if self.app_settings: + for key in self.app_settings.keys(): + self.app_settings_strDic[key] = self.app_settings[key] + + elif self.state == 'absent': + if old_response: + self.log("Delete Web App instance") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_webapp() + + self.log('Web App instance deleted') + + else: + self.fail("Web app {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update web app') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if Actions.CreateOrUpdate in self.to_do: + response = self.create_update_webapp() + + self.results['id'] = response['id'] + + if Actions.UpdateAppSettings in self.to_do: + update_response = self.update_app_settings() + self.results['id'] = update_response.id + + webapp = None + if old_response: + webapp = old_response + if response: + webapp = response + + if webapp: + if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \ + (webapp['state'] != 'Running' and self.app_state == 'started') or \ + self.app_state == 'restarted': + + self.results['changed'] = True + if self.check_mode: + return self.results + + self.set_webapp_state(self.app_state) + + return self.results + + # compare existing web app with input, determine weather it's update operation + def is_updatable_property_changed(self, existing_webapp): + for property_name in self.updatable_properties: + if hasattr(self, property_name) and getattr(self, property_name) is not None and \ + getattr(self, property_name) != existing_webapp.get(property_name, None): + return True + + return False + + # compare xxx_version + def is_site_config_changed(self, existing_config): + for fx_version in self.site_config_updatable_properties: + if self.site_config.get(fx_version): + if not getattr(existing_config, fx_version) or \ + getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper(): + return True + + return False + + # comparing existing app setting with input, determine whether it's changed + def is_app_settings_changed(self): + if self.app_settings: + if self.app_settings_strDic: + for key in self.app_settings.keys(): + if self.app_settings[key] != self.app_settings_strDic.get(key, None): + return True + else: + return True + return False + + # comparing deployment source with input, determine wheather it's changed + def is_deployment_source_changed(self, existing_webapp): + if self.deployment_source: + if self.deployment_source.get('url') \ + and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']: + return True + + if self.deployment_source.get('branch') \ + and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']: + return True + + return False + + def create_update_webapp(self): + ''' + Creates or updates Web App with the specified configuration. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Creating / Updating the Web App instance {0}".format(self.name)) + + try: + skip_dns_registration = self.dns_registration + force_dns_registration = None if self.dns_registration is None else not self.dns_registration + + response = self.web_client.web_apps.create_or_update(resource_group_name=self.resource_group, + name=self.name, + site_envelope=self.site, + skip_dns_registration=skip_dns_registration, + skip_custom_domain_verification=self.skip_custom_domain_verification, + force_dns_registration=force_dns_registration, + ttl_in_seconds=self.ttl_in_seconds) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the Web App instance.') + self.fail( + "Error creating the Web App instance: {0}".format(str(exc))) + return webapp_to_dict(response) + + def delete_webapp(self): + ''' + Deletes specified Web App instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Web App instance {0}".format(self.name)) + try: + response = self.web_client.web_apps.delete(resource_group_name=self.resource_group, + name=self.name) + except CloudError as e: + self.log('Error attempting to delete the Web App instance.') + self.fail( + "Error deleting the Web App instance: {0}".format(str(e))) + + return True + + def get_webapp(self): + ''' + Gets the properties of the specified Web App. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Checking if the Web App instance {0} is present".format(self.name)) + + response = None + + try: + response = self.web_client.web_apps.get(resource_group_name=self.resource_group, + name=self.name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App instance : {0} found".format(response.name)) + return webapp_to_dict(response) + + except CloudError as ex: + pass + + self.log("Didn't find web app {0} in resource group {1}".format( + self.name, self.resource_group)) + + return False + + def get_app_service_plan(self): + ''' + Gets app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Get App Service Plan {0}".format(self.plan['name'])) + + try: + response = self.web_client.app_service_plans.get( + resource_group_name=self.plan['resource_group'], + name=self.plan['name']) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("App Service Plan : {0} found".format(response.name)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + pass + + self.log("Didn't find app service plan {0} in resource group {1}".format( + self.plan['name'], self.plan['resource_group'])) + + return False + + def create_app_service_plan(self): + ''' + Creates app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Create App Service Plan {0}".format(self.plan['name'])) + + try: + # normalize sku + sku = _normalize_sku(self.plan['sku']) + + sku_def = SkuDescription(tier=get_sku_name( + sku), name=sku, capacity=(self.plan.get('number_of_workers', None))) + plan_def = AppServicePlan( + location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None))) + + poller = self.web_client.app_service_plans.create_or_update( + self.plan['resource_group'], self.plan['name'], plan_def) + + if isinstance(poller, LROPoller): + response = self.get_poller_result(poller) + + self.log("Response : {0}".format(response)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format( + self.plan['name'], self.plan['resource_group'], str(ex))) + + def list_app_settings(self): + ''' + List application settings + :return: deserialized list response + ''' + self.log("List application setting") + + try: + + response = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, name=self.name) + self.log("Response : {0}".format(response)) + + return response.properties + except CloudError as ex: + self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def update_app_settings(self): + ''' + Update application settings + :return: deserialized updating response + ''' + self.log("Update application setting") + + try: + response = self.web_client.web_apps.update_application_settings( + resource_group_name=self.resource_group, name=self.name, properties=self.app_settings_strDic) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def create_or_update_source_control(self): + ''' + Update site source control + :return: deserialized updating response + ''' + self.log("Update site source control") + + if self.deployment_source is None: + return False + + self.deployment_source['is_manual_integration'] = False + self.deployment_source['is_mercurial'] = False + + try: + response = self.web_client.web_client.create_or_update_source_control( + self.resource_group, self.name, self.deployment_source) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except CloudError as ex: + self.fail("Failed to update site source control for web app {0} in resource group {1}".format( + self.name, self.resource_group)) + + def get_webapp_configuration(self): + ''' + Get web app configuration + :return: deserialized web app configuration response + ''' + self.log("Get web app configuration") + + try: + + response = self.web_client.web_apps.get_configuration( + resource_group_name=self.resource_group, name=self.name) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + return False + + def set_webapp_state(self, appstate): + ''' + Start/stop/restart web app + :return: deserialized updating response + ''' + try: + if appstate == 'started': + response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name) + elif appstate == 'stopped': + response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name) + elif appstate == 'restarted': + response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name) + else: + self.fail("Invalid web app state {0}".format(appstate)) + + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format( + appstate, self.name, self.resource_group, request_id, str(ex))) + + +def main(): + """Main execution""" + AzureRMWebApps() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_webapp_info.py b/test/support/integration/plugins/modules/azure_rm_webapp_info.py new file mode 100644 index 00000000..22286803 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_webapp_info.py @@ -0,0 +1,489 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_webapp_info + +version_added: "2.9" + +short_description: Get Azure web app facts + +description: + - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription. + +options: + name: + description: + - Only show results for a specific web app. + resource_group: + description: + - Limit results by resource group. + return_publish_profile: + description: + - Indicate whether to return publishing profile of the web app. + default: False + type: bool + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + +extends_documentation_fragment: + - azure + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get facts for web app by name + azure_rm_webapp_info: + resource_group: myResourceGroup + name: winwebapp1 + + - name: Get facts for web apps in resource group + azure_rm_webapp_info: + resource_group: myResourceGroup + + - name: Get facts for web apps with tags + azure_rm_webapp_info: + tags: + - testtag + - foo:bar +''' + +RETURN = ''' +webapps: + description: + - List of web apps. + returned: always + type: complex + contains: + id: + description: + - ID of the web app. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp + name: + description: + - Name of the web app. + returned: always + type: str + sample: winwebapp1 + resource_group: + description: + - Resource group of the web app. + returned: always + type: str + sample: myResourceGroup + location: + description: + - Location of the web app. + returned: always + type: str + sample: eastus + plan: + description: + - ID of app service plan used by the web app. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppServicePlan + app_settings: + description: + - App settings of the application. Only returned when web app has app settings. + returned: always + type: dict + sample: { + "testkey": "testvalue", + "testkey2": "testvalue2" + } + frameworks: + description: + - Frameworks of the application. Only returned when web app has frameworks. + returned: always + type: list + sample: [ + { + "name": "net_framework", + "version": "v4.0" + }, + { + "name": "java", + "settings": { + "java_container": "tomcat", + "java_container_version": "8.5" + }, + "version": "1.7" + }, + { + "name": "php", + "version": "5.6" + } + ] + availability_state: + description: + - Availability of this web app. + returned: always + type: str + sample: Normal + default_host_name: + description: + - Host name of the web app. + returned: always + type: str + sample: vxxisurg397winapp4.azurewebsites.net + enabled: + description: + - Indicates the web app enabled or not. + returned: always + type: bool + sample: true + enabled_host_names: + description: + - Enabled host names of the web app. + returned: always + type: list + sample: [ + "vxxisurg397winapp4.azurewebsites.net", + "vxxisurg397winapp4.scm.azurewebsites.net" + ] + host_name_ssl_states: + description: + - SSL state per host names of the web app. + returned: always + type: list + sample: [ + { + "hostType": "Standard", + "name": "vxxisurg397winapp4.azurewebsites.net", + "sslState": "Disabled" + }, + { + "hostType": "Repository", + "name": "vxxisurg397winapp4.scm.azurewebsites.net", + "sslState": "Disabled" + } + ] + host_names: + description: + - Host names of the web app. + returned: always + type: list + sample: [ + "vxxisurg397winapp4.azurewebsites.net" + ] + outbound_ip_addresses: + description: + - Outbound IP address of the web app. + returned: always + type: str + sample: "40.71.11.131,40.85.166.200,168.62.166.67,137.135.126.248,137.135.121.45" + ftp_publish_url: + description: + - Publishing URL of the web app when deployment type is FTP. + returned: always + type: str + sample: ftp://xxxx.ftp.azurewebsites.windows.net + state: + description: + - State of the web app. + returned: always + type: str + sample: running + publishing_username: + description: + - Publishing profile user name. + returned: only when I(return_publish_profile=True). + type: str + sample: "$vxxisuRG397winapp4" + publishing_password: + description: + - Publishing profile password. + returned: only when I(return_publish_profile=True). + type: str + sample: "uvANsPQpGjWJmrFfm4Ssd5rpBSqGhjMk11pMSgW2vCsQtNx9tcgZ0xN26s9A" + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict + sample: { tag1: abc } +''' +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +AZURE_OBJECT_CLASS = 'WebApp' + + +class AzureRMWebAppInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list'), + return_publish_profile=dict(type='bool', default=False), + ) + + self.results = dict( + changed=False, + webapps=[], + ) + + self.name = None + self.resource_group = None + self.tags = None + self.return_publish_profile = False + + self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby'] + + super(AzureRMWebAppInfo, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_webapp_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_webapp_facts' module has been renamed to 'azure_rm_webapp_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['webapps'] = self.list_by_name() + elif self.resource_group: + self.results['webapps'] = self.list_by_resource_group() + else: + self.results['webapps'] = self.list_all() + + return self.results + + def list_by_name(self): + self.log('Get web app {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.web_client.web_apps.get(self.resource_group, self.name) + except CloudError: + pass + + if item and self.has_tags(item.tags, self.tags): + curated_result = self.get_curated_webapp(self.resource_group, self.name, item) + result = [curated_result] + + return result + + def list_by_resource_group(self): + self.log('List web apps in resource groups {0}'.format(self.resource_group)) + try: + response = list(self.web_client.web_apps.list_by_resource_group(self.resource_group)) + except CloudError as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing web apps in resource groups {0}, request id: {1} - {2}".format(self.resource_group, request_id, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.get_curated_webapp(self.resource_group, item.name, item) + results.append(curated_output) + return results + + def list_all(self): + self.log('List web apps in current subscription') + try: + response = list(self.web_client.web_apps.list()) + except CloudError as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing web apps, request id {0} - {1}".format(request_id, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.get_curated_webapp(item.resource_group, item.name, item) + results.append(curated_output) + return results + + def list_webapp_configuration(self, resource_group, name): + self.log('Get web app {0} configuration'.format(name)) + + response = [] + + try: + response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name) + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex))) + + return response.as_dict() + + def list_webapp_appsettings(self, resource_group, name): + self.log('Get web app {0} app settings'.format(name)) + + response = [] + + try: + response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name) + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex))) + + return response.as_dict() + + def get_publish_credentials(self, resource_group, name): + self.log('Get web app {0} publish credentials'.format(name)) + try: + poller = self.web_client.web_apps.list_publishing_credentials(resource_group, name) + if isinstance(poller, LROPoller): + response = self.get_poller_result(poller) + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex))) + return response + + def get_webapp_ftp_publish_url(self, resource_group, name): + import xmltodict + + self.log('Get web app {0} app publish profile'.format(name)) + + url = None + try: + content = self.web_client.web_apps.list_publishing_profile_xml_with_secrets(resource_group_name=resource_group, name=name) + if not content: + return url + + full_xml = '' + for f in content: + full_xml += f.decode() + profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile'] + + if not profiles: + return url + + for profile in profiles: + if profile['@publishMethod'] == 'FTP': + url = profile['@publishUrl'] + + except CloudError as ex: + self.fail('Error getting web app {0} app settings'.format(name)) + + return url + + def get_curated_webapp(self, resource_group, name, webapp): + pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS) + + try: + site_config = self.list_webapp_configuration(resource_group, name) + app_settings = self.list_webapp_appsettings(resource_group, name) + publish_cred = self.get_publish_credentials(resource_group, name) + ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name) + except CloudError as ex: + pass + return self.construct_curated_webapp(webapp=pip, + configuration=site_config, + app_settings=app_settings, + deployment_slot=None, + ftp_publish_url=ftp_publish_url, + publish_credentials=publish_cred) + + def construct_curated_webapp(self, + webapp, + configuration=None, + app_settings=None, + deployment_slot=None, + ftp_publish_url=None, + publish_credentials=None): + curated_output = dict() + curated_output['id'] = webapp['id'] + curated_output['name'] = webapp['name'] + curated_output['resource_group'] = webapp['properties']['resourceGroup'] + curated_output['location'] = webapp['location'] + curated_output['plan'] = webapp['properties']['serverFarmId'] + curated_output['tags'] = webapp.get('tags', None) + + # important properties from output. not match input arguments. + curated_output['app_state'] = webapp['properties']['state'] + curated_output['availability_state'] = webapp['properties']['availabilityState'] + curated_output['default_host_name'] = webapp['properties']['defaultHostName'] + curated_output['host_names'] = webapp['properties']['hostNames'] + curated_output['enabled'] = webapp['properties']['enabled'] + curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames'] + curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates'] + curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses'] + + # curated site_config + if configuration: + curated_output['frameworks'] = [] + for fx_name in self.framework_names: + fx_version = configuration.get(fx_name + '_version', None) + if fx_version: + fx = { + 'name': fx_name, + 'version': fx_version + } + # java container setting + if fx_name == 'java': + if configuration['java_container'] and configuration['java_container_version']: + settings = { + 'java_container': configuration['java_container'].lower(), + 'java_container_version': configuration['java_container_version'] + } + fx['settings'] = settings + + curated_output['frameworks'].append(fx) + + # linux_fx_version + if configuration.get('linux_fx_version', None): + tmp = configuration.get('linux_fx_version').split("|") + if len(tmp) == 2: + curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]}) + + # curated app_settings + if app_settings and app_settings.get('properties', None): + curated_output['app_settings'] = dict() + for item in app_settings['properties']: + curated_output['app_settings'][item] = app_settings['properties'][item] + + # curated deploymenet_slot + if deployment_slot: + curated_output['deployment_slot'] = deployment_slot + + # ftp_publish_url + if ftp_publish_url: + curated_output['ftp_publish_url'] = ftp_publish_url + + # curated publish credentials + if publish_credentials and self.return_publish_profile: + curated_output['publishing_username'] = publish_credentials.publishing_user_name + curated_output['publishing_password'] = publish_credentials.publishing_password + return curated_output + + +def main(): + AzureRMWebAppInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_webappslot.py b/test/support/integration/plugins/modules/azure_rm_webappslot.py new file mode 100644 index 00000000..ddba710b --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_webappslot.py @@ -0,0 +1,1058 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_webappslot +version_added: "2.8" +short_description: Manage Azure Web App slot +description: + - Create, update and delete Azure Web App slot. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + name: + description: + - Unique name of the deployment slot to create or update. + required: True + webapp_name: + description: + - Web app name which this deployment slot belongs to. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + configuration_source: + description: + - Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot. + auto_swap_slot_name: + description: + - Used to configure target slot name to auto swap, or disable auto swap. + - Set it target slot name to auto swap. + - Set it to False to disable auto slot swap. + swap: + description: + - Swap deployment slots of a web app. + suboptions: + action: + description: + - Swap types. + - C(preview) is to apply target slot settings on source slot first. + - C(swap) is to complete swapping. + - C(reset) is to reset the swap. + choices: + - preview + - swap + - reset + default: preview + target_slot: + description: + - Name of target slot to swap. If set to None, then swap with production slot. + preserve_vnet: + description: + - C(True) to preserve virtual network to the slot during swap. Otherwise C(False). + type: bool + default: True + frameworks: + description: + - Set of run time framework settings. Each setting is a dictionary. + - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info. + suboptions: + name: + description: + - Name of the framework. + - Supported framework list for Windows web app and Linux web app is different. + - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018. + - Windows web apps support multiple framework at same time. + - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018. + - Linux web apps support only one framework. + - Java framework is mutually exclusive with others. + choices: + - java + - net_framework + - php + - python + - ruby + - dotnetcore + - node + version: + description: + - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info. + - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5. + - C(php) supported value sample, C(5.5), C(5.6), C(7.0). + - C(python) supported value sample, C(5.5), C(5.6), C(7.0). + - C(node) supported value sample, C(6.6), C(6.9). + - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2). + - C(ruby) supported value sample, 2.3. + - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app. + settings: + description: + - List of settings of the framework. + suboptions: + java_container: + description: + - Name of Java container. This is supported by specific framework C(java) onlys, for example C(Tomcat), C(Jetty). + java_container_version: + description: + - Version of Java container. This is supported by specific framework C(java) only. + - For C(Tomcat), for example C(8.0), C(8.5), C(9.0). For C(Jetty), for example C(9.1), C(9.3). + container_settings: + description: + - Web app slot container settings. + suboptions: + name: + description: + - Name of container, for example C(imagename:tag). + registry_server_url: + description: + - Container registry server URL, for example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + startup_file: + description: + - The slot startup file. + - This only applies for Linux web app slot. + app_settings: + description: + - Configure web app slot application settings. Suboptions are in key value pair format. + purge_app_settings: + description: + - Purge any existing application settings. Replace slot application settings with app_settings. + type: bool + deployment_source: + description: + - Deployment source for git. + suboptions: + url: + description: + - Repository URL of deployment source. + branch: + description: + - The branch name of the repository. + app_state: + description: + - Start/Stop/Restart the slot. + type: str + choices: + - started + - stopped + - restarted + default: started + state: + description: + - State of the Web App deployment slot. + - Use C(present) to create or update a slot and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a webapp slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + configuration_source: myJavaWebApp + app_settings: + testkey: testvalue + + - name: swap the slot with production slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + swap: + action: swap + + - name: stop the slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + app_state: stopped + + - name: udpate a webapp slot app settings + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + app_settings: + testkey: testvalue2 + + - name: udpate a webapp slot frameworks + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + frameworks: + - name: "node" + version: "10.1" +''' + +RETURN = ''' +id: + description: + - ID of current slot. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1 +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model + from azure.mgmt.web.models import ( + site_config, app_service_plan, Site, + AppServicePlan, SkuDescription, NameValuePair + ) +except ImportError: + # This is handled in azure_rm_common + pass + +swap_spec = dict( + action=dict( + type='str', + choices=[ + 'preview', + 'swap', + 'reset' + ], + default='preview' + ), + target_slot=dict( + type='str' + ), + preserve_vnet=dict( + type='bool', + default=True + ) +) + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + +deployment_source_spec = dict( + url=dict(type='str'), + branch=dict(type='str') +) + + +framework_settings_spec = dict( + java_container=dict(type='str', required=True), + java_container_version=dict(type='str', required=True) +) + + +framework_spec = dict( + name=dict( + type='str', + required=True, + choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']), + version=dict(type='str', required=True), + settings=dict(type='dict', options=framework_settings_spec) +) + + +def webapp_to_dict(webapp): + return dict( + id=webapp.id, + name=webapp.name, + location=webapp.location, + client_cert_enabled=webapp.client_cert_enabled, + enabled=webapp.enabled, + reserved=webapp.reserved, + client_affinity_enabled=webapp.client_affinity_enabled, + server_farm_id=webapp.server_farm_id, + host_names_disabled=webapp.host_names_disabled, + https_only=webapp.https_only if hasattr(webapp, 'https_only') else None, + skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None, + ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None, + state=webapp.state, + tags=webapp.tags if webapp.tags else None + ) + + +def slot_to_dict(slot): + return dict( + id=slot.id, + resource_group=slot.resource_group, + server_farm_id=slot.server_farm_id, + target_swap_slot=slot.target_swap_slot, + enabled_host_names=slot.enabled_host_names, + slot_swap_status=slot.slot_swap_status, + name=slot.name, + location=slot.location, + enabled=slot.enabled, + reserved=slot.reserved, + host_names_disabled=slot.host_names_disabled, + state=slot.state, + repository_site_name=slot.repository_site_name, + default_host_name=slot.default_host_name, + kind=slot.kind, + site_config=slot.site_config, + tags=slot.tags if slot.tags else None + ) + + +class Actions: + NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4) + + +class AzureRMWebAppSlots(AzureRMModuleBase): + """Configuration class for an Azure RM Web App slot resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + webapp_name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + configuration_source=dict( + type='str' + ), + auto_swap_slot_name=dict( + type='raw' + ), + swap=dict( + type='dict', + options=swap_spec + ), + frameworks=dict( + type='list', + elements='dict', + options=framework_spec + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ), + deployment_source=dict( + type='dict', + options=deployment_source_spec + ), + startup_file=dict( + type='str' + ), + app_settings=dict( + type='dict' + ), + purge_app_settings=dict( + type='bool', + default=False + ), + app_state=dict( + type='str', + choices=['started', 'stopped', 'restarted'], + default='started' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + mutually_exclusive = [['container_settings', 'frameworks']] + + self.resource_group = None + self.name = None + self.webapp_name = None + self.location = None + + self.auto_swap_slot_name = None + self.swap = None + self.tags = None + self.startup_file = None + self.configuration_source = None + self.clone = False + + # site config, e.g app settings, ssl + self.site_config = dict() + self.app_settings = dict() + self.app_settings_strDic = None + + # siteSourceControl + self.deployment_source = dict() + + # site, used at level creation, or update. + self.site = None + + # property for internal usage, not used for sdk + self.container_settings = None + + self.purge_app_settings = False + self.app_state = 'started' + + self.results = dict( + changed=False, + id=None, + ) + self.state = None + self.to_do = Actions.NoAction + + self.frameworks = None + + # set site_config value from kwargs + self.site_config_updatable_frameworks = ["net_framework_version", + "java_version", + "php_version", + "python_version", + "linux_fx_version"] + + self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java'] + self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java'] + + super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "scm_type": + self.site_config[key] = kwargs[key] + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get web app + webapp_response = self.get_webapp() + + if not webapp_response: + self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group)) + + # get slot + old_response = self.get_slot() + + # set is_linux + is_linux = True if webapp_response['reserved'] else False + + if self.state == 'present': + if self.frameworks: + # java is mutually exclusive with other frameworks + if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks): + self.fail('Java is mutually exclusive with other frameworks.') + + if is_linux: + if len(self.frameworks) != 1: + self.fail('Can specify one framework only for Linux web app.') + + if self.frameworks[0]['name'] not in self.supported_linux_frameworks: + self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name'])) + + self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper() + + if self.frameworks[0]['name'] == 'java': + if self.frameworks[0]['version'] != '8': + self.fail("Linux web app only supports java 8.") + + if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \ + self.frameworks[0]['settings']['java_container'].lower() != 'tomcat': + self.fail("Linux web app only supports tomcat container.") + + if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \ + self.frameworks[0]['settings']['java_container'].lower() == 'tomcat': + self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8' + else: + self.site_config['linux_fx_version'] = 'JAVA|8-jre8' + else: + for fx in self.frameworks: + if fx.get('name') not in self.supported_windows_frameworks: + self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name'))) + else: + self.site_config[fx.get('name') + '_version'] = fx.get('version') + + if 'settings' in fx and fx['settings'] is not None: + for key, value in fx['settings'].items(): + self.site_config[key] = value + + if not self.app_settings: + self.app_settings = dict() + + if self.container_settings: + linux_fx_version = 'DOCKER|' + + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + + linux_fx_version += self.container_settings['registry_server_url'] + '/' + + linux_fx_version += self.container_settings['name'] + + self.site_config['linux_fx_version'] = linux_fx_version + + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user'] + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password'] + + # set auto_swap_slot_name + if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str): + self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name + if self.auto_swap_slot_name is False: + self.site_config['auto_swap_slot_name'] = None + + # init site + self.site = Site(location=self.location, site_config=self.site_config) + + # check if the slot already present in the webapp + if not old_response: + self.log("Web App slot doesn't exist") + + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + self.site.tags = self.tags + + # if linux, setup startup_file + if self.startup_file: + self.site_config['app_command_line'] = self.startup_file + + # set app setting + if self.app_settings: + app_settings = [] + for key in self.app_settings.keys(): + app_settings.append(NameValuePair(name=key, value=self.app_settings[key])) + + self.site_config['app_settings'] = app_settings + + # clone slot + if self.configuration_source: + self.clone = True + + else: + # existing slot, do update + self.log("Web App slot already exists") + + self.log('Result: {0}'.format(old_response)) + + update_tags, self.site.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + + # check if site_config changed + old_config = self.get_configuration_slot(self.name) + + if self.is_site_config_changed(old_config): + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + + self.app_settings_strDic = self.list_app_settings_slot(self.name) + + # purge existing app_settings: + if self.purge_app_settings: + to_be_updated = True + self.to_do = Actions.UpdateAppSettings + self.app_settings_strDic = dict() + + # check if app settings changed + if self.purge_app_settings or self.is_app_settings_changed(): + to_be_updated = True + self.to_do = Actions.UpdateAppSettings + + if self.app_settings: + for key in self.app_settings.keys(): + self.app_settings_strDic[key] = self.app_settings[key] + + elif self.state == 'absent': + if old_response: + self.log("Delete Web App slot") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_slot() + + self.log('Web App slot deleted') + + else: + self.log("Web app slot {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update web app') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.to_do == Actions.CreateOrUpdate: + response = self.create_update_slot() + + self.results['id'] = response['id'] + + if self.clone: + self.clone_slot() + + if self.to_do == Actions.UpdateAppSettings: + self.update_app_settings_slot() + + slot = None + if response: + slot = response + if old_response: + slot = old_response + + if slot: + if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \ + (slot['state'] != 'Running' and self.app_state == 'started') or \ + self.app_state == 'restarted': + + self.results['changed'] = True + if self.check_mode: + return self.results + + self.set_state_slot(self.app_state) + + if self.swap: + self.results['changed'] = True + if self.check_mode: + return self.results + + self.swap_slot() + + return self.results + + # compare site config + def is_site_config_changed(self, existing_config): + for fx_version in self.site_config_updatable_frameworks: + if self.site_config.get(fx_version): + if not getattr(existing_config, fx_version) or \ + getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper(): + return True + + if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None: + return True + elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None): + return True + return False + + # comparing existing app setting with input, determine whether it's changed + def is_app_settings_changed(self): + if self.app_settings: + if len(self.app_settings_strDic) != len(self.app_settings): + return True + + if self.app_settings_strDic != self.app_settings: + return True + return False + + # comparing deployment source with input, determine whether it's changed + def is_deployment_source_changed(self, existing_webapp): + if self.deployment_source: + if self.deployment_source.get('url') \ + and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']: + return True + + if self.deployment_source.get('branch') \ + and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']: + return True + + return False + + def create_update_slot(self): + ''' + Creates or updates Web App slot with the specified configuration. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Creating / Updating the Web App slot {0}".format(self.name)) + + try: + response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group, + slot=self.name, + name=self.webapp_name, + site_envelope=self.site) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the Web App slot instance.') + self.fail("Error creating the Web App slot: {0}".format(str(exc))) + return slot_to_dict(response) + + def delete_slot(self): + ''' + Deletes specified Web App slot in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Web App slot {0}".format(self.name)) + try: + response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + except CloudError as e: + self.log('Error attempting to delete the Web App slot.') + self.fail( + "Error deleting the Web App slots: {0}".format(str(e))) + + return True + + def get_webapp(self): + ''' + Gets the properties of the specified Web App. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Checking if the Web App instance {0} is present".format(self.webapp_name)) + + response = None + + try: + response = self.web_client.web_apps.get(resource_group_name=self.resource_group, + name=self.webapp_name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App instance : {0} found".format(response.name)) + return webapp_to_dict(response) + + except CloudError as ex: + pass + + self.log("Didn't find web app {0} in resource group {1}".format( + self.webapp_name, self.resource_group)) + + return False + + def get_slot(self): + ''' + Gets the properties of the specified Web App slot. + + :return: deserialized Web App slot state dictionary + ''' + self.log( + "Checking if the Web App slot {0} is present".format(self.name)) + + response = None + + try: + response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App slot: {0} found".format(response.name)) + return slot_to_dict(response) + + except CloudError as ex: + pass + + self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group)) + + return False + + def list_app_settings(self): + ''' + List webapp application settings + :return: deserialized list response + ''' + self.log("List webapp application setting") + + try: + + response = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, name=self.webapp_name) + self.log("Response : {0}".format(response)) + + return response.properties + except CloudError as ex: + self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def list_app_settings_slot(self, slot_name): + ''' + List application settings + :return: deserialized list response + ''' + self.log("List application setting") + + try: + + response = self.web_client.web_apps.list_application_settings_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name) + self.log("Response : {0}".format(response)) + + return response.properties + except CloudError as ex: + self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def update_app_settings_slot(self, slot_name=None, app_settings=None): + ''' + Update application settings + :return: deserialized updating response + ''' + self.log("Update application setting") + + if slot_name is None: + slot_name = self.name + if app_settings is None: + app_settings = self.app_settings_strDic + try: + response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=slot_name, + kind=None, + properties=app_settings) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except CloudError as ex: + self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + return response + + def create_or_update_source_control_slot(self): + ''' + Update site source control + :return: deserialized updating response + ''' + self.log("Update site source control") + + if self.deployment_source is None: + return False + + self.deployment_source['is_manual_integration'] = False + self.deployment_source['is_mercurial'] = False + + try: + response = self.web_client.web_client.create_or_update_source_control_slot( + resource_group_name=self.resource_group, + name=self.webapp_name, + site_source_control=self.deployment_source, + slot=self.name) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except CloudError as ex: + self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def get_configuration(self): + ''' + Get web app configuration + :return: deserialized web app configuration response + ''' + self.log("Get web app configuration") + + try: + + response = self.web_client.web_apps.get_configuration( + resource_group_name=self.resource_group, name=self.webapp_name) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format( + self.webapp_name, self.resource_group, str(ex))) + + def get_configuration_slot(self, slot_name): + ''' + Get slot configuration + :return: deserialized slot configuration response + ''' + self.log("Get web app slot configuration") + + try: + + response = self.web_client.web_apps.get_configuration_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format( + slot_name, self.resource_group, str(ex))) + + def update_configuration_slot(self, slot_name=None, site_config=None): + ''' + Update slot configuration + :return: deserialized slot configuration response + ''' + self.log("Update web app slot configuration") + + if slot_name is None: + slot_name = self.name + if site_config is None: + site_config = self.site_config + try: + + response = self.web_client.web_apps.update_configuration_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format( + slot_name, self.resource_group, str(ex))) + + def set_state_slot(self, appstate): + ''' + Start/stop/restart web app slot + :return: deserialized updating response + ''' + try: + if appstate == 'started': + response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + elif appstate == 'stopped': + response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + elif appstate == 'restarted': + response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + else: + self.fail("Invalid web app slot state {0}".format(appstate)) + + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format( + appstate, self.name, self.resource_group, request_id, str(ex))) + + def swap_slot(self): + ''' + Swap slot + :return: deserialized response + ''' + self.log("Swap slot") + + try: + if self.swap['action'] == 'swap': + if self.swap['target_slot'] is None: + response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group, + name=self.webapp_name, + target_slot=self.name, + preserve_vnet=self.swap['preserve_vnet']) + else: + response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name, + target_slot=self.swap['target_slot'], + preserve_vnet=self.swap['preserve_vnet']) + elif self.swap['action'] == 'preview': + if self.swap['target_slot'] is None: + response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group, + name=self.webapp_name, + target_slot=self.name, + preserve_vnet=self.swap['preserve_vnet']) + else: + response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name, + target_slot=self.swap['target_slot'], + preserve_vnet=self.swap['preserve_vnet']) + elif self.swap['action'] == 'reset': + if self.swap['target_slot'] is None: + response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group, + name=self.webapp_name) + else: + response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.swap['target_slot']) + response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex))) + + def clone_slot(self): + if self.configuration_source: + src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source + + if src_slot is None: + site_config_clone_from = self.get_configuration() + else: + site_config_clone_from = self.get_configuration_slot(slot_name=src_slot) + + self.update_configuration_slot(site_config=site_config_clone_from) + + if src_slot is None: + app_setting_clone_from = self.list_app_settings() + else: + app_setting_clone_from = self.list_app_settings_slot(src_slot) + + if self.app_settings: + app_setting_clone_from.update(self.app_settings) + + self.update_app_settings_slot(app_settings=app_setting_clone_from) + + +def main(): + """Main execution""" + AzureRMWebAppSlots() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/cloud_init_data_facts.py b/test/support/integration/plugins/modules/cloud_init_data_facts.py new file mode 100644 index 00000000..4f871b99 --- /dev/null +++ b/test/support/integration/plugins/modules/cloud_init_data_facts.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, René Moser <mail@renemoser.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloud_init_data_facts +short_description: Retrieve facts of cloud-init. +description: + - Gathers facts by reading the status.json and result.json of cloud-init. +version_added: 2.6 +author: René Moser (@resmo) +options: + filter: + description: + - Filter facts + choices: [ status, result ] +notes: + - See http://cloudinit.readthedocs.io/ for more information about cloud-init. +''' + +EXAMPLES = ''' +- name: Gather all facts of cloud init + cloud_init_data_facts: + register: result + +- debug: + var: result + +- name: Wait for cloud init to finish + cloud_init_data_facts: + filter: status + register: res + until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" + retries: 50 + delay: 5 +''' + +RETURN = ''' +--- +cloud_init_data_facts: + description: Facts of result and status. + returned: success + type: dict + sample: '{ + "status": { + "v1": { + "datasource": "DataSourceCloudStack", + "errors": [] + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + }' +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text + + +CLOUD_INIT_PATH = "/var/lib/cloud/data/" + + +def gather_cloud_init_data_facts(module): + res = { + 'cloud_init_data_facts': dict() + } + + for i in ['result', 'status']: + filter = module.params.get('filter') + if filter is None or filter == i: + res['cloud_init_data_facts'][i] = dict() + json_file = CLOUD_INIT_PATH + i + '.json' + + if os.path.exists(json_file): + f = open(json_file, 'rb') + contents = to_text(f.read(), errors='surrogate_or_strict') + f.close() + + if contents: + res['cloud_init_data_facts'][i] = module.from_json(contents) + return res + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filter=dict(choices=['result', 'status']), + ), + supports_check_mode=True, + ) + + facts = gather_cloud_init_data_facts(module) + result = dict(changed=False, ansible_facts=facts, **facts) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/cloudformation.py b/test/support/integration/plugins/modules/cloudformation.py new file mode 100644 index 00000000..cd031465 --- /dev/null +++ b/test/support/integration/plugins/modules/cloudformation.py @@ -0,0 +1,837 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: cloudformation +short_description: Create or delete an AWS CloudFormation stack +description: + - Launches or updates an AWS CloudFormation stack and waits for it complete. +notes: + - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh. + The version listed in the requirements is the oldest version that works with the module as a whole. + Some features may require recent versions, and we do not pinpoint a minimum version for each feature. + Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs. +version_added: "1.1" +options: + stack_name: + description: + - Name of the CloudFormation stack. + required: true + type: str + disable_rollback: + description: + - If a stacks fails to form, rollback will remove the stack. + default: false + type: bool + on_create_failure: + description: + - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option. + choices: + - DO_NOTHING + - ROLLBACK + - DELETE + version_added: "2.8" + type: str + create_timeout: + description: + - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED + version_added: "2.6" + type: int + template_parameters: + description: + - A list of hashes of all the template variables for the stack. The value can be a string or a dict. + - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example). + default: {} + type: dict + state: + description: + - If I(state=present), stack will be created. + - If I(state=present) and if stack exists and template has changed, it will be updated. + - If I(state=absent), stack will be removed. + default: present + choices: [ present, absent ] + type: str + template: + description: + - The local path of the CloudFormation template. + - This must be the full path to the file, relative to the working directory. If using roles this may look + like C(roles/cloudformation/files/cloudformation-example.json). + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), + I(template_body) nor I(template_url) are specified, the previous template will be reused. + type: path + notification_arns: + description: + - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events. + version_added: "2.0" + type: str + stack_policy: + description: + - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified. + for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051) + version_added: "1.9" + type: str + tags: + description: + - Dictionary of tags to associate with stack and its resources during stack creation. + - Can be updated later, updating tags removes previous entries. + version_added: "1.4" + type: dict + template_url: + description: + - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an + S3 bucket in the same region as the stack. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified, + the previous template will be reused. + version_added: "2.0" + type: str + create_changeset: + description: + - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)." + - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be + deleted immediately with no changeset." + type: bool + default: false + version_added: "2.4" + changeset_name: + description: + - Name given to the changeset when creating a changeset. + - Only used when I(create_changeset=true). + - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters. + See the AWS Change Sets docs for more information + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) + version_added: "2.4" + type: str + template_format: + description: + - This parameter is ignored since Ansible 2.3 and will be removed in Ansible 2.14. + - Templates are now passed raw to CloudFormation regardless of format. + version_added: "2.0" + type: str + role_arn: + description: + - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role + docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html) + version_added: "2.3" + type: str + termination_protection: + description: + - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18. + type: bool + version_added: "2.5" + template_body: + description: + - Template body. Use this to pass in the actual body of the CloudFormation template. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) + are specified, the previous template will be reused. + version_added: "2.5" + type: str + events_limit: + description: + - Maximum number of CloudFormation events to fetch from a stack when creating or updating it. + default: 200 + version_added: "2.7" + type: int + backoff_delay: + description: + - Number of seconds to wait for the next retry. + default: 3 + version_added: "2.8" + type: int + required: False + backoff_max_delay: + description: + - Maximum amount of time to wait between retries. + default: 30 + version_added: "2.8" + type: int + required: False + backoff_retries: + description: + - Number of times to retry operation. + - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times. + default: 10 + version_added: "2.8" + type: int + required: False + capabilities: + description: + - Specify capabilities that stack template contains. + - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND). + type: list + elements: str + version_added: "2.8" + default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ] + +author: "James S. Martin (@jsmartin)" +extends_documentation_fragment: +- aws +- ec2 +requirements: [ boto3, botocore>=1.5.45 ] +''' + +EXAMPLES = ''' +- name: create a cloudformation stack + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + +# Basic role example +- name: create a stack, specify role that cloudformation assumes + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role' + +- name: delete a stack + cloudformation: + stack_name: "ansible-cloudformation-old" + state: "absent" + +# Create a stack, pass in template from a URL, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template via an URL + cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template body via lookup template + cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_body: "{{ lookup('template', 'cloudformation.j2') }}" + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute +# When use_previous_value is set to True, the given value will be ignored and +# CloudFormation will use the value from a previously submitted template. +# If use_previous_value is set to False (default) the given value is used. +- cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + template: "files/cloudformation-example.json" + template_parameters: + DBSnapshotIdentifier: + use_previous_value: True + value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot + DBName: + use_previous_value: True + tags: + Stack: "ansible-cloudformation" + +# Enable termination protection on a stack. +# If the stack already exists, this will update its termination protection +- name: enable termination protection during stack creation + cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + termination_protection: yes + +# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED +# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back. +- name: enable termination protection during stack creation + cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + create_timeout: 5 + +# Configure rollback behaviour on the unsuccessful creation of a stack allowing +# CloudFormation to clean up, or do nothing in the event of an unsuccessful +# deployment +# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if +# it fails to create +- name: create stack which will delete on creation failure + cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + on_create_failure: DELETE +''' + +RETURN = ''' +events: + type: list + description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. + returned: always + sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"] +log: + description: Debugging logs. Useful when modifying or finding an error. + returned: always + type: list + sample: ["updating stack"] +change_set_id: + description: The ID of the stack change set if one was created + returned: I(state=present) and I(create_changeset=true) + type: str + sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0" +stack_resources: + description: AWS stack resources and their status. List of dictionaries, one dict per resource. + returned: state == present + type: list + sample: [ + { + "last_updated_time": "2016-10-11T19:40:14.979000+00:00", + "logical_resource_id": "CFTestSg", + "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F", + "resource_type": "AWS::EC2::SecurityGroup", + "status": "UPDATE_COMPLETE", + "status_reason": null + } + ] +stack_outputs: + type: dict + description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. + returned: state == present + sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} +''' # NOQA + +import json +import time +import uuid +import traceback +from hashlib import sha1 + +try: + import boto3 + import botocore + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_conn, boto_exception, ec2_argument_spec, get_aws_connection_info +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + + +def get_stack_events(cfn, stack_name, events_limit, token_filter=None): + '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.''' + ret = {'events': [], 'log': []} + + try: + pg = cfn.get_paginator( + 'describe_stack_events' + ).paginate( + StackName=stack_name, + PaginationConfig={'MaxItems': events_limit} + ) + if token_filter is not None: + events = list(pg.search( + "StackEvents[?ClientRequestToken == '{0}']".format(token_filter) + )) + else: + events = list(pg.search("StackEvents[*]")) + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist' in error_msg: + # missing stack, don't bail. + ret['log'].append('Stack does not exist.') + return ret + ret['log'].append('Unknown error: ' + str(error_msg)) + return ret + + for e in events: + eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e) + ret['events'].append(eventline) + + if e['ResourceStatus'].endswith('FAILED'): + failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e) + ret['log'].append(failline) + + return ret + + +def create_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.") + + # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and + # 'OnFailure' only apply on creation, not update. + if module.params.get('on_create_failure') is not None: + stack_params['OnFailure'] = module.params['on_create_failure'] + else: + stack_params['DisableRollback'] = module.params['disable_rollback'] + + if module.params.get('create_timeout') is not None: + stack_params['TimeoutInMinutes'] = module.params['create_timeout'] + if module.params.get('termination_protection') is not None: + if boto_supports_termination_protection(cfn): + stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection')) + else: + module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18") + + try: + response = cfn.create_stack(**stack_params) + # Use stack ID to follow stack state in case of on_create_failure = DELETE + result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None)) + except Exception as err: + error_msg = boto_exception(err) + module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc()) + if not result: + module.fail_json(msg="empty result") + return result + + +def list_changesets(cfn, stack_name): + res = cfn.list_change_sets(StackName=stack_name) + return [cs['ChangeSetName'] for cs in res['Summaries']] + + +def create_changeset(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template' or 'template_url' is required.") + if module.params['changeset_name'] is not None: + stack_params['ChangeSetName'] = module.params['changeset_name'] + + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + changeset_name = build_changeset_name(stack_params) + stack_params['ChangeSetName'] = changeset_name + + # Determine if this changeset already exists + pending_changesets = list_changesets(cfn, stack_params['StackName']) + if changeset_name in pending_changesets: + warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets) + result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning]) + else: + cs = cfn.create_change_set(**stack_params) + # Make sure we don't enter an infinite loop + time_end = time.time() + 600 + while time.time() < time_end: + try: + newcs = cfn.describe_change_set(ChangeSetName=cs['Id']) + except botocore.exceptions.BotoCoreError as err: + error_msg = boto_exception(err) + module.fail_json(msg=error_msg) + if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS': + time.sleep(1) + elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']: + cfn.delete_change_set(ChangeSetName=cs['Id']) + result = dict(changed=False, + output='The created Change Set did not contain any changes to this stack and was deleted.') + # a failed change set does not trigger any stack events so we just want to + # skip any further processing of result and just return it directly + return result + else: + break + # Lets not hog the cpu/spam the AWS API + time.sleep(1) + result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit) + result['change_set_id'] = cs['Id'] + result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']), + 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'], + 'NOTE that dependencies on this stack might fail due to pending changes!'] + except Exception as err: + error_msg = boto_exception(err) + if 'No updates are to be performed.' in error_msg: + result = dict(changed=False, output='Stack is already up-to-date.') + else: + module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc()) + + if not result: + module.fail_json(msg="empty result") + return result + + +def update_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + stack_params['UsePreviousTemplate'] = True + + # if the state is present and the stack already exists, we try to update it. + # AWS will tell us if the stack template and parameters are the same and + # don't need to be updated. + try: + cfn.update_stack(**stack_params) + result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None)) + except Exception as err: + error_msg = boto_exception(err) + if 'No updates are to be performed.' in error_msg: + result = dict(changed=False, output='Stack is already up-to-date.') + else: + module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc()) + if not result: + module.fail_json(msg="empty result") + return result + + +def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state): + '''updates termination protection of a stack''' + if not boto_supports_termination_protection(cfn): + module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18") + stack = get_stack_facts(cfn, stack_name) + if stack: + if stack['EnableTerminationProtection'] is not desired_termination_protection_state: + try: + cfn.update_termination_protection( + EnableTerminationProtection=desired_termination_protection_state, + StackName=stack_name) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=boto_exception(e), exception=traceback.format_exc()) + + +def boto_supports_termination_protection(cfn): + '''termination protection was added in botocore 1.7.18''' + return hasattr(cfn, "update_termination_protection") + + +def stack_operation(cfn, stack_name, operation, events_limit, op_token=None): + '''gets the status of a stack while it is created/updated/deleted''' + existed = [] + while True: + try: + stack = get_stack_facts(cfn, stack_name) + existed.append('yes') + except Exception: + # If the stack previously existed, and now can't be found then it's + # been deleted successfully. + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()} + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + if not stack: + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'}) + return ret + # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE + # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13 + elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET': + ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation}) + return ret + elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE': + ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'}) + return ret + # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases. + elif stack['StackStatus'].endswith('_COMPLETE'): + ret.update({'changed': True, 'output': 'Stack %s complete' % operation}) + return ret + elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation}) + return ret + # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. + elif stack['StackStatus'].endswith('_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation}) + return ret + else: + # this can loop forever :/ + time.sleep(5) + return {'failed': True, 'output': 'Failed for unknown reasons.'} + + +def build_changeset_name(stack_params): + if 'ChangeSetName' in stack_params: + return stack_params['ChangeSetName'] + + json_params = json.dumps(stack_params, sort_keys=True) + + return 'Ansible-{0}-{1}'.format( + stack_params['StackName'], + sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest() + ) + + +def check_mode_changeset(module, stack_params, cfn): + """Create a change set, describe it and delete it before returning check mode outputs.""" + stack_params['ChangeSetName'] = build_changeset_name(stack_params) + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + change_set = cfn.create_change_set(**stack_params) + for i in range(60): # total time 5 min + description = cfn.describe_change_set(ChangeSetName=change_set['Id']) + if description['Status'] in ('CREATE_COMPLETE', 'FAILED'): + break + time.sleep(5) + else: + # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail + module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName']) + + cfn.delete_change_set(ChangeSetName=change_set['Id']) + + reason = description.get('StatusReason') + + if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']: + return {'changed': False, 'msg': reason, 'meta': description['StatusReason']} + return {'changed': True, 'msg': reason, 'meta': description['Changes']} + + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def get_stack_facts(cfn, stack_name): + try: + stack_response = cfn.describe_stacks(StackName=stack_name) + stack_info = stack_response['Stacks'][0] + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist' in error_msg: + # missing stack, don't bail. + return None + + # other error, bail. + raise err + + if stack_response and stack_response.get('Stacks', None): + stacks = stack_response['Stacks'] + if len(stacks): + stack_info = stacks[0] + + return stack_info + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + stack_name=dict(required=True), + template_parameters=dict(required=False, type='dict', default={}), + state=dict(default='present', choices=['present', 'absent']), + template=dict(default=None, required=False, type='path'), + notification_arns=dict(default=None, required=False), + stack_policy=dict(default=None, required=False), + disable_rollback=dict(default=False, type='bool'), + on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), + create_timeout=dict(default=None, type='int'), + template_url=dict(default=None, required=False), + template_body=dict(default=None, required=False), + template_format=dict(removed_in_version='2.14'), + create_changeset=dict(default=False, type='bool'), + changeset_name=dict(default=None, required=False), + role_arn=dict(default=None, required=False), + tags=dict(default=None, type='dict'), + termination_protection=dict(default=None, type='bool'), + events_limit=dict(default=200, type='int'), + backoff_retries=dict(type='int', default=10, required=False), + backoff_delay=dict(type='int', default=3, required=False), + backoff_max_delay=dict(type='int', default=30, required=False), + capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['template_url', 'template', 'template_body'], + ['disable_rollback', 'on_create_failure']], + supports_check_mode=True + ) + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required for this module') + + invalid_capabilities = [] + user_capabilities = module.params.get('capabilities') + for user_cap in user_capabilities: + if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']: + invalid_capabilities.append(user_cap) + + if invalid_capabilities: + module.fail_json(msg="Specified capabilities are invalid : %r," + " please check documentation for valid capabilities" % invalid_capabilities) + + # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. + stack_params = { + 'Capabilities': user_capabilities, + 'ClientRequestToken': to_native(uuid.uuid4()), + } + state = module.params['state'] + stack_params['StackName'] = module.params['stack_name'] + + if module.params['template'] is not None: + with open(module.params['template'], 'r') as template_fh: + stack_params['TemplateBody'] = template_fh.read() + elif module.params['template_body'] is not None: + stack_params['TemplateBody'] = module.params['template_body'] + elif module.params['template_url'] is not None: + stack_params['TemplateURL'] = module.params['template_url'] + + if module.params.get('notification_arns'): + stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') + else: + stack_params['NotificationARNs'] = [] + + # can't check the policy when verifying. + if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']: + with open(module.params['stack_policy'], 'r') as stack_policy_fh: + stack_params['StackPolicyBody'] = stack_policy_fh.read() + + template_parameters = module.params['template_parameters'] + + stack_params['Parameters'] = [] + for k, v in template_parameters.items(): + if isinstance(v, dict): + # set parameter based on a dict to allow additional CFN Parameter Attributes + param = dict(ParameterKey=k) + + if 'value' in v: + param['ParameterValue'] = str(v['value']) + + if 'use_previous_value' in v and bool(v['use_previous_value']): + param['UsePreviousValue'] = True + param.pop('ParameterValue', None) + + stack_params['Parameters'].append(param) + else: + # allow default k/v configuration to set a template parameter + stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + + if isinstance(module.params.get('tags'), dict): + stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + + if module.params.get('role_arn'): + stack_params['RoleARN'] = module.params['role_arn'] + + result = {} + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg=boto_exception(e)) + + # Wrap the cloudformation client methods that this module uses with + # automatic backoff / retry for throttling error codes + backoff_wrapper = AWSRetry.jittered_backoff( + retries=module.params.get('backoff_retries'), + delay=module.params.get('backoff_delay'), + max_delay=module.params.get('backoff_max_delay') + ) + cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events) + cfn.create_stack = backoff_wrapper(cfn.create_stack) + cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets) + cfn.create_change_set = backoff_wrapper(cfn.create_change_set) + cfn.update_stack = backoff_wrapper(cfn.update_stack) + cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks) + cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources) + cfn.delete_stack = backoff_wrapper(cfn.delete_stack) + if boto_supports_termination_protection(cfn): + cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection) + + stack_info = get_stack_facts(cfn, stack_params['StackName']) + + if module.check_mode: + if state == 'absent' and stack_info: + module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) + elif state == 'absent' and not stack_info: + module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) + elif state == 'present' and not stack_info: + module.exit_json(changed=True, msg='New stack would be created', meta=[]) + else: + module.exit_json(**check_mode_changeset(module, stack_params, cfn)) + + if state == 'present': + if not stack_info: + result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) + elif module.params.get('create_changeset'): + result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) + else: + if module.params.get('termination_protection') is not None: + update_termination_protection(module, cfn, stack_params['StackName'], + bool(module.params.get('termination_protection'))) + result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) + + # format the stack output + + stack = get_stack_facts(cfn, stack_params['StackName']) + if stack is not None: + if result.get('stack_outputs') is None: + # always define stack_outputs, but it may be empty + result['stack_outputs'] = {} + for output in stack.get('Outputs', []): + result['stack_outputs'][output['OutputKey']] = output['OutputValue'] + stack_resources = [] + reslist = cfn.list_stack_resources(StackName=stack_params['StackName']) + for res in reslist.get('StackResourceSummaries', []): + stack_resources.append({ + "logical_resource_id": res['LogicalResourceId'], + "physical_resource_id": res.get('PhysicalResourceId', ''), + "resource_type": res['ResourceType'], + "last_updated_time": res['LastUpdatedTimestamp'], + "status": res['ResourceStatus'], + "status_reason": res.get('ResourceStatusReason') # can be blank, apparently + }) + result['stack_resources'] = stack_resources + + elif state == 'absent': + # absent state is different because of the way delete_stack works. + # problem is it it doesn't give an error if stack isn't found + # so must describe the stack first + + try: + stack = get_stack_facts(cfn, stack_params['StackName']) + if not stack: + result = {'changed': False, 'output': 'Stack not found.'} + else: + if stack_params.get('RoleARN') is None: + cfn.delete_stack(StackName=stack_params['StackName']) + else: + cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) + result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), + stack_params.get('ClientRequestToken', None)) + except Exception as err: + module.fail_json(msg=boto_exception(err), exception=traceback.format_exc()) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/cloudformation_info.py b/test/support/integration/plugins/modules/cloudformation_info.py new file mode 100644 index 00000000..ee2e5c17 --- /dev/null +++ b/test/support/integration/plugins/modules/cloudformation_info.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudformation_info +short_description: Obtain information about an AWS CloudFormation stack +description: + - Gets information about an AWS CloudFormation stack. + - This module was called C(cloudformation_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(cloudformation_info) module no longer returns C(ansible_facts)! +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 +version_added: "2.2" +author: + - Justin Menga (@jmenga) + - Kevin Coming (@waffie1) +options: + stack_name: + description: + - The name or id of the CloudFormation stack. Gathers information on all stacks by default. + type: str + all_facts: + description: + - Get all stack information for the stack. + type: bool + default: false + stack_events: + description: + - Get stack events for the stack. + type: bool + default: false + stack_template: + description: + - Get stack template body for the stack. + type: bool + default: false + stack_resources: + description: + - Get stack resources for the stack. + type: bool + default: false + stack_policy: + description: + - Get stack policy for the stack. + type: bool + default: false + stack_change_sets: + description: + - Get stack change sets for the stack + type: bool + default: false + version_added: '2.10' +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Get summary information about a stack +- cloudformation_info: + stack_name: my-cloudformation-stack + register: output + +- debug: + msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}" + +# When the module is called as cloudformation_facts, return values are published +# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows. +# Note that this is deprecated and will stop working in Ansible 2.13. + +- cloudformation_facts: + stack_name: my-cloudformation-stack + +- debug: + msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}" + +# Get stack outputs, when you have the stack name available as a fact +- set_fact: + stack_name: my-awesome-stack + +- cloudformation_info: + stack_name: "{{ stack_name }}" + register: my_stack + +- debug: + msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}" + +# Get all stack information about a stack +- cloudformation_info: + stack_name: my-cloudformation-stack + all_facts: true + +# Get stack resource and stack policy information about a stack +- cloudformation_info: + stack_name: my-cloudformation-stack + stack_resources: true + stack_policy: true + +# Fail if the stack doesn't exist +- name: try to get facts about a stack but fail if it doesn't exist + cloudformation_info: + stack_name: nonexistent-stack + all_facts: yes + failed_when: cloudformation['nonexistent-stack'] is undefined +''' + +RETURN = ''' +stack_description: + description: Summary facts about the stack + returned: if the stack exists + type: dict +stack_outputs: + description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each + output 'OutputValue' parameter + returned: if the stack exists + type: dict + sample: + ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com +stack_parameters: + description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of + each parameter 'ParameterValue' parameter + returned: if the stack exists + type: dict + sample: + DatabaseEngine: mysql + DatabasePassword: "***" +stack_events: + description: All stack events for the stack + returned: only if all_facts or stack_events is true and the stack exists + type: list +stack_policy: + description: Describes the stack policy for the stack + returned: only if all_facts or stack_policy is true and the stack exists + type: dict +stack_template: + description: Describes the stack template for the stack + returned: only if all_facts or stack_template is true and the stack exists + type: dict +stack_resource_list: + description: Describes stack resources for the stack + returned: only if all_facts or stack_resourses is true and the stack exists + type: list +stack_resources: + description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each + resource 'PhysicalResourceId' parameter + returned: only if all_facts or stack_resourses is true and the stack exists + type: dict + sample: + AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7" + AutoScalingSecurityGroup: "sg-abcd1234" + ApplicationDatabase: "dazvlpr01xj55a" +stack_change_sets: + description: A list of stack change sets. Each item in the list represents the details of a specific changeset + + returned: only if all_facts or stack_change_sets is true and the stack exists + type: list +''' + +import json +import traceback + +from functools import partial +from ansible.module_utils._text import to_native +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict) + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + + +class CloudFormationServiceManager: + """Handles CloudFormation Services""" + + def __init__(self, module): + self.module = module + self.client = module.client('cloudformation') + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stacks_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_stacks') + return paginator.paginate(**kwargs).build_full_result()['Stacks'] + + def describe_stacks(self, stack_name=None): + try: + kwargs = {'StackName': stack_name} if stack_name else {} + response = self.describe_stacks_with_backoff(**kwargs) + if response is not None: + return response + self.module.fail_json(msg="Error describing stack(s) - an empty response was returned") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + if 'does not exist' in e.response['Error']['Message']: + # missing stack, don't bail. + return {} + self.module.fail_json_aws(e, msg="Error describing stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_resources_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_stack_resources') + return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries'] + + def list_stack_resources(self, stack_name): + try: + return self.list_stack_resources_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_events_with_backoff(self, stack_name): + paginator = self.client.get_paginator('describe_stack_events') + return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents'] + + def describe_stack_events(self, stack_name): + try: + return self.describe_stack_events_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_change_sets_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_change_sets') + return paginator.paginate(StackName=stack_name).build_full_result()['Summaries'] + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_change_set_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_change_set') + return paginator.paginate(**kwargs).build_full_result() + + def describe_stack_change_sets(self, stack_name): + changes = [] + try: + change_sets = self.list_stack_change_sets_with_backoff(stack_name) + for item in change_sets: + changes.append(self.describe_stack_change_set_with_backoff( + StackName=stack_name, + ChangeSetName=item['ChangeSetName'])) + return changes + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_stack_policy_with_backoff(self, stack_name): + return self.client.get_stack_policy(StackName=stack_name) + + def get_stack_policy(self, stack_name): + try: + response = self.get_stack_policy_with_backoff(stack_name) + stack_policy = response.get('StackPolicyBody') + if stack_policy: + return json.loads(stack_policy) + return dict() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_template_with_backoff(self, stack_name): + return self.client.get_template(StackName=stack_name) + + def get_template(self, stack_name): + try: + response = self.get_template_with_backoff(stack_name) + return response.get('TemplateBody') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name) + + +def to_dict(items, key, value): + ''' Transforms a list of items to a Key/Value dictionary ''' + if items: + return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) + else: + return dict() + + +def main(): + argument_spec = dict( + stack_name=dict(), + all_facts=dict(required=False, default=False, type='bool'), + stack_policy=dict(required=False, default=False, type='bool'), + stack_events=dict(required=False, default=False, type='bool'), + stack_resources=dict(required=False, default=False, type='bool'), + stack_template=dict(required=False, default=False, type='bool'), + stack_change_sets=dict(required=False, default=False, type='bool'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + is_old_facts = module._name == 'cloudformation_facts' + if is_old_facts: + module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', " + "and the renamed one no longer returns ansible_facts", + version='2.13', collection_name='ansible.builtin') + + service_mgr = CloudFormationServiceManager(module) + + if is_old_facts: + result = {'ansible_facts': {'cloudformation': {}}} + else: + result = {'cloudformation': {}} + + for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')): + facts = {'stack_description': stack_description} + stack_name = stack_description.get('StackName') + + # Create stack output and stack parameter dictionaries + if facts['stack_description']: + facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') + facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), + 'ParameterKey', 'ParameterValue') + facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags')) + + # Create optional stack outputs + all_facts = module.params.get('all_facts') + if all_facts or module.params.get('stack_resources'): + facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) + facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), + 'LogicalResourceId', 'PhysicalResourceId') + if all_facts or module.params.get('stack_template'): + facts['stack_template'] = service_mgr.get_template(stack_name) + if all_facts or module.params.get('stack_policy'): + facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get('stack_events'): + facts['stack_events'] = service_mgr.describe_stack_events(stack_name) + if all_facts or module.params.get('stack_change_sets'): + facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name) + + if is_old_facts: + result['ansible_facts']['cloudformation'][stack_name] = facts + else: + result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', + 'stack_parameters', + 'stack_policy', + 'stack_resources', + 'stack_tags', + 'stack_template')) + + module.exit_json(changed=False, **result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/deploy_helper.py b/test/support/integration/plugins/modules/deploy_helper.py new file mode 100644 index 00000000..38594dde --- /dev/null +++ b/test/support/integration/plugins/modules/deploy_helper.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl> +# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: deploy_helper +version_added: "2.0" +author: "Ramon de la Fuente (@ramondelafuente)" +short_description: Manages some of the steps common in deploying projects. +description: + - The Deploy Helper manages some of the steps common in deploying software. + It creates a folder structure, manages a symlink for the current release + and cleans up old releases. + - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact. + C(project_path), whatever you set in the path parameter, + C(current_path), the path to the symlink that points to the active release, + C(releases_path), the path to the folder to keep releases in, + C(shared_path), the path to the folder to keep shared resources in, + C(unfinished_filename), the file to check for to recognize unfinished builds, + C(previous_release), the release the 'current' symlink is pointing to, + C(previous_release_path), the full path to the 'current' symlink target, + C(new_release), either the 'release' parameter or a generated timestamp, + C(new_release_path), the path to the new release folder (not created by the module)." + +options: + path: + required: True + aliases: ['dest'] + description: + - the root path of the project. Alias I(dest). + Returned in the C(deploy_helper.project_path) fact. + + state: + description: + - the state of the project. + C(query) will only gather facts, + C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders, + C(finalize) will remove the unfinished_filename file, create a symlink to the newly + deployed release and optionally clean old releases, + C(clean) will remove failed & old releases, + C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent)) + choices: [ present, finalize, absent, clean, query ] + default: present + + release: + description: + - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359'). + This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize). + You can use the generated fact C(release={{ deploy_helper.new_release }}). + + releases_path: + description: + - the name of the folder that will hold the releases. This can be relative to C(path) or absolute. + Returned in the C(deploy_helper.releases_path) fact. + default: releases + + shared_path: + description: + - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute. + If this is set to an empty string, no shared folder will be created. + Returned in the C(deploy_helper.shared_path) fact. + default: shared + + current_path: + description: + - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean). + Returned in the C(deploy_helper.current_path) fact. + default: current + + unfinished_filename: + description: + - the name of the file that indicates a deploy has not finished. All folders in the releases_path that + contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is + automatically deleted from the I(new_release_path) during C(state=finalize). + default: DEPLOY_UNFINISHED + + clean: + description: + - Whether to run the clean procedure in case of C(state=finalize). + type: bool + default: 'yes' + + keep_releases: + description: + - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds + will be deleted first, so only correct releases will count. The current version will not count. + default: 5 + +notes: + - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden + parameters to both calls, otherwise the second call will overwrite the facts of the first one. + - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a + new naming strategy without problems. + - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent + unless you pass your own release name with C(release). Due to the nature of deploying software, this should not + be much of a problem. +''' + +EXAMPLES = ''' + +# General explanation, starting with an example folder structure for a project: + +# root: +# releases: +# - 20140415234508 +# - 20140415235146 +# - 20140416082818 +# +# shared: +# - sessions +# - uploads +# +# current: releases/20140416082818 + + +# The 'releases' folder holds all the available releases. A release is a complete build of the application being +# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem. +# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like +# git tags or commit hashes. +# +# During a deploy, a new folder should be created in the releases folder and any build steps required should be +# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink +# with a link to this build. +# +# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server +# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release +# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps. +# +# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress. +# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new +# release is reduced to the time it takes to switch the link. +# +# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release +# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated +# procedure to remove it during cleanup. + + +# Typical usage +- name: Initialize the deploy root and gather facts + deploy_helper: + path: /path/to/root +- name: Clone the project to the new release folder + git: + repo: git://foosball.example.org/path/to/repo.git + dest: '{{ deploy_helper.new_release_path }}' + version: v1.1.1 +- name: Add an unfinished file, to allow cleanup on successful finalize + file: + path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}' + state: touch +- name: Perform some build steps, like running your dependency manager for example + composer: + command: install + working_dir: '{{ deploy_helper.new_release_path }}' +- name: Create some folders in the shared folder + file: + path: '{{ deploy_helper.shared_path }}/{{ item }}' + state: directory + with_items: + - sessions + - uploads +- name: Add symlinks from the new release to the shared folder + file: + path: '{{ deploy_helper.new_release_path }}/{{ item.path }}' + src: '{{ deploy_helper.shared_path }}/{{ item.src }}' + state: link + with_items: + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads +- name: Finalize the deploy, removing the unfinished file and switching the symlink + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Retrieving facts before running a deploy +- name: Run 'state=query' to gather facts without changing anything + deploy_helper: + path: /path/to/root + state: query +# Remember to set the 'release' parameter when you actually call 'state=present' later +- name: Initialize the deploy root + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: present + +# all paths can be absolute or relative (to the 'path' parameter) +- deploy_helper: + path: /path/to/root + releases_path: /var/www/project/releases + shared_path: /var/www/shared + current_path: /var/www/active + +# Using your own naming strategy for releases (a version tag in this case): +- deploy_helper: + path: /path/to/root + release: v1.1.1 + state: present +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Using a different unfinished_filename: +- deploy_helper: + path: /path/to/root + unfinished_filename: README.md + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Postponing the cleanup of older builds: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + clean: False +- deploy_helper: + path: /path/to/root + state: clean +# Or running the cleanup ahead of the new deploy +- deploy_helper: + path: /path/to/root + state: clean +- deploy_helper: + path: /path/to/root + state: present + +# Keeping more old releases: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + keep_releases: 10 +# Or, if you use 'clean=false' on finalize: +- deploy_helper: + path: /path/to/root + state: clean + keep_releases: 10 + +# Removing the entire project root folder +- deploy_helper: + path: /path/to/root + state: absent + +# Debugging the facts returned by the module +- deploy_helper: + path: /path/to/root +- debug: + var: deploy_helper +''' +import os +import shutil +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class DeployHelper(object): + + def __init__(self, module): + self.module = module + self.file_args = module.load_file_common_arguments(module.params) + + self.clean = module.params['clean'] + self.current_path = module.params['current_path'] + self.keep_releases = module.params['keep_releases'] + self.path = module.params['path'] + self.release = module.params['release'] + self.releases_path = module.params['releases_path'] + self.shared_path = module.params['shared_path'] + self.state = module.params['state'] + self.unfinished_filename = module.params['unfinished_filename'] + + def gather_facts(self): + current_path = os.path.join(self.path, self.current_path) + releases_path = os.path.join(self.path, self.releases_path) + if self.shared_path: + shared_path = os.path.join(self.path, self.shared_path) + else: + shared_path = None + + previous_release, previous_release_path = self._get_last_release(current_path) + + if not self.release and (self.state == 'query' or self.state == 'present'): + self.release = time.strftime("%Y%m%d%H%M%S") + + if self.release: + new_release_path = os.path.join(releases_path, self.release) + else: + new_release_path = None + + return { + 'project_path': self.path, + 'current_path': current_path, + 'releases_path': releases_path, + 'shared_path': shared_path, + 'previous_release': previous_release, + 'previous_release_path': previous_release_path, + 'new_release': self.release, + 'new_release_path': new_release_path, + 'unfinished_filename': self.unfinished_filename + } + + def delete_path(self, path): + if not os.path.lexists(path): + return False + + if not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + if not self.module.check_mode: + try: + shutil.rmtree(path, ignore_errors=False) + except Exception as e: + self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc()) + + return True + + def create_path(self, path): + changed = False + + if not os.path.lexists(path): + changed = True + if not self.module.check_mode: + os.makedirs(path) + + elif not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed) + + return changed + + def check_link(self, path): + if os.path.lexists(path): + if not os.path.islink(path): + self.module.fail_json(msg="%s exists but is not a symbolic link" % path) + + def create_link(self, source, link_name): + changed = False + + if os.path.islink(link_name): + norm_link = os.path.normpath(os.path.realpath(link_name)) + norm_source = os.path.normpath(os.path.realpath(source)) + if norm_link == norm_source: + changed = False + else: + changed = True + if not self.module.check_mode: + if not os.path.lexists(source): + self.module.fail_json(msg="the symlink target %s doesn't exists" % source) + tmp_link_name = link_name + '.' + self.unfinished_filename + if os.path.islink(tmp_link_name): + os.unlink(tmp_link_name) + os.symlink(source, tmp_link_name) + os.rename(tmp_link_name, link_name) + else: + changed = True + if not self.module.check_mode: + os.symlink(source, link_name) + + return changed + + def remove_unfinished_file(self, new_release_path): + changed = False + unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename) + if os.path.lexists(unfinished_file_path): + changed = True + if not self.module.check_mode: + os.remove(unfinished_file_path) + + return changed + + def remove_unfinished_builds(self, releases_path): + changes = 0 + + for release in os.listdir(releases_path): + if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)): + if self.module.check_mode: + changes += 1 + else: + changes += self.delete_path(os.path.join(releases_path, release)) + + return changes + + def remove_unfinished_link(self, path): + changed = False + + tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename) + if not self.module.check_mode and os.path.exists(tmp_link_name): + changed = True + os.remove(tmp_link_name) + + return changed + + def cleanup(self, releases_path, reserve_version): + changes = 0 + + if os.path.lexists(releases_path): + releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))] + try: + releases.remove(reserve_version) + except ValueError: + pass + + if not self.module.check_mode: + releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True) + for release in releases[self.keep_releases:]: + changes += self.delete_path(os.path.join(releases_path, release)) + elif len(releases) > self.keep_releases: + changes += (len(releases) - self.keep_releases) + + return changes + + def _get_file_args(self, path): + file_args = self.file_args.copy() + file_args['path'] = path + return file_args + + def _get_last_release(self, current_path): + previous_release = None + previous_release_path = None + + if os.path.lexists(current_path): + previous_release_path = os.path.realpath(current_path) + previous_release = os.path.basename(previous_release_path) + + return previous_release, previous_release_path + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(aliases=['dest'], required=True, type='path'), + release=dict(required=False, type='str', default=None), + releases_path=dict(required=False, type='str', default='releases'), + shared_path=dict(required=False, type='path', default='shared'), + current_path=dict(required=False, type='path', default='current'), + keep_releases=dict(required=False, type='int', default=5), + clean=dict(required=False, type='bool', default=True), + unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'), + state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + ), + add_file_common_args=True, + supports_check_mode=True + ) + + deploy_helper = DeployHelper(module) + facts = deploy_helper.gather_facts() + + result = { + 'state': deploy_helper.state + } + + changes = 0 + + if deploy_helper.state == 'query': + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'present': + deploy_helper.check_link(facts['current_path']) + changes += deploy_helper.create_path(facts['project_path']) + changes += deploy_helper.create_path(facts['releases_path']) + if deploy_helper.shared_path: + changes += deploy_helper.create_path(facts['shared_path']) + + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'finalize': + if not deploy_helper.release: + module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)") + if deploy_helper.keep_releases <= 0: + module.fail_json(msg="'keep_releases' should be at least 1") + + changes += deploy_helper.remove_unfinished_file(facts['new_release_path']) + changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path']) + if deploy_helper.clean: + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'clean': + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'absent': + # destroy the facts + result['ansible_facts'] = {'deploy_helper': []} + changes += deploy_helper.delete_path(facts['project_path']) + + if changes > 0: + result['changed'] = True + else: + result['changed'] = False + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/docker_swarm.py b/test/support/integration/plugins/modules/docker_swarm.py new file mode 100644 index 00000000..a2c076c5 --- /dev/null +++ b/test/support/integration/plugins/modules/docker_swarm.py @@ -0,0 +1,681 @@ +#!/usr/bin/python + +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: docker_swarm +short_description: Manage Swarm cluster +version_added: "2.7" +description: + - Create a new Swarm cluster. + - Add/Remove nodes or managers to an existing cluster. +options: + advertise_addr: + description: + - Externally reachable address advertised to other nodes. + - This can either be an address/port combination + in the form C(192.168.1.1:4567), or an interface followed by a + port number, like C(eth0:4567). + - If the port number is omitted, + the port number from the listen address is used. + - If I(advertise_addr) is not specified, it will be automatically + detected when possible. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default_addr_pool: + description: + - Default address pool in CIDR format. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: list + elements: str + version_added: "2.8" + subnet_size: + description: + - Default address pool subnet mask length. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: int + version_added: "2.8" + listen_addr: + description: + - Listen address used for inter-manager communication. + - This can either be an address/port combination in the form + C(192.168.1.1:4567), or an interface followed by a port number, + like C(eth0:4567). + - If the port number is omitted, the default swarm listening port + is used. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default: 0.0.0.0:2377 + force: + description: + - Use with state C(present) to force creating a new Swarm, even if already part of one. + - Use with state C(absent) to Leave the swarm even if this node is a manager. + type: bool + default: no + state: + description: + - Set to C(present), to create/update a new cluster. + - Set to C(join), to join an existing cluster. + - Set to C(absent), to leave an existing cluster. + - Set to C(remove), to remove an absent node from the cluster. + Note that removing requires Docker SDK for Python >= 2.4.0. + - Set to C(inspect) to display swarm informations. + type: str + default: present + choices: + - present + - join + - absent + - remove + - inspect + node_id: + description: + - Swarm id of the node to remove. + - Used with I(state=remove). + type: str + join_token: + description: + - Swarm token used to join a swarm cluster. + - Used with I(state=join). + type: str + remote_addrs: + description: + - Remote address of one or more manager nodes of an existing Swarm to connect to. + - Used with I(state=join). + type: list + elements: str + task_history_retention_limit: + description: + - Maximum number of tasks history stored. + - Docker default value is C(5). + type: int + snapshot_interval: + description: + - Number of logs entries between snapshot. + - Docker default value is C(10000). + type: int + keep_old_snapshots: + description: + - Number of snapshots to keep beyond the current snapshot. + - Docker default value is C(0). + type: int + log_entries_for_slow_followers: + description: + - Number of log entries to keep around to sync up slow followers after a snapshot is created. + type: int + heartbeat_tick: + description: + - Amount of ticks (in seconds) between each heartbeat. + - Docker default value is C(1s). + type: int + election_tick: + description: + - Amount of ticks (in seconds) needed without a leader to trigger a new election. + - Docker default value is C(10s). + type: int + dispatcher_heartbeat_period: + description: + - The delay for an agent to send a heartbeat to the dispatcher. + - Docker default value is C(5s). + type: int + node_cert_expiry: + description: + - Automatic expiry for nodes certificates. + - Docker default value is C(3months). + type: int + name: + description: + - The name of the swarm. + type: str + labels: + description: + - User-defined key/value metadata. + - Label operations in this module apply to the docker swarm cluster. + Use M(docker_node) module to add/modify/remove swarm node labels. + - Requires API version >= 1.32. + type: dict + signing_ca_cert: + description: + - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a certificate, but the contents of the certificate. + - Requires API version >= 1.30. + type: str + signing_ca_key: + description: + - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a key, but the contents of the key. + - Requires API version >= 1.30. + type: str + ca_force_rotate: + description: + - An integer whose purpose is to force swarm to generate a new signing CA certificate and key, + if none have been specified. + - Docker default value is C(0). + - Requires API version >= 1.30. + type: int + autolock_managers: + description: + - If set, generate a key and use it to lock data stored on the managers. + - Docker default value is C(no). + - M(docker_swarm_info) can be used to retrieve the unlock key. + type: bool + rotate_worker_token: + description: Rotate the worker join token. + type: bool + default: no + rotate_manager_token: + description: Rotate the manager join token. + type: bool + default: no +extends_documentation_fragment: + - docker + - docker.docker_py_1_documentation +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - Docker API >= 1.25 +author: + - Thierry Bouvet (@tbouvet) + - Piotr Wojciechowski (@WojciechowskiPiotr) +''' + +EXAMPLES = ''' + +- name: Init a new swarm with default parameters + docker_swarm: + state: present + +- name: Update swarm configuration + docker_swarm: + state: present + election_tick: 5 + +- name: Add nodes + docker_swarm: + state: join + advertise_addr: 192.168.1.2 + join_token: SWMTKN-1--xxxxx + remote_addrs: [ '192.168.1.1:2377' ] + +- name: Leave swarm for a node + docker_swarm: + state: absent + +- name: Remove a swarm manager + docker_swarm: + state: absent + force: true + +- name: Remove node from swarm + docker_swarm: + state: remove + node_id: mynode + +- name: Inspect swarm + docker_swarm: + state: inspect + register: swarm_info +''' + +RETURN = ''' +swarm_facts: + description: Informations about swarm. + returned: success + type: dict + contains: + JoinTokens: + description: Tokens to connect to the Swarm. + returned: success + type: dict + contains: + Worker: + description: Token to create a new *worker* node + returned: success + type: str + example: SWMTKN-1--xxxxx + Manager: + description: Token to create a new *manager* node + returned: success + type: str + example: SWMTKN-1--xxxxx + UnlockKey: + description: The swarm unlock-key if I(autolock_managers) is C(true). + returned: on success if I(autolock_managers) is C(true) + and swarm is initialised, or if I(autolock_managers) has changed. + type: str + example: SWMKEY-1-xxx + +actions: + description: Provides the actions done on the swarm. + returned: when action failed. + type: list + elements: str + example: "['This cluster is already a swarm cluster']" + +''' + +import json +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible.module_utils.docker.common import ( + DockerBaseClass, + DifferenceTracker, + RequestException, +) + +from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient + +from ansible.module_utils._text import to_native + + +class TaskParameters(DockerBaseClass): + def __init__(self): + super(TaskParameters, self).__init__() + + self.advertise_addr = None + self.listen_addr = None + self.remote_addrs = None + self.join_token = None + + # Spec + self.snapshot_interval = None + self.task_history_retention_limit = None + self.keep_old_snapshots = None + self.log_entries_for_slow_followers = None + self.heartbeat_tick = None + self.election_tick = None + self.dispatcher_heartbeat_period = None + self.node_cert_expiry = None + self.name = None + self.labels = None + self.log_driver = None + self.signing_ca_cert = None + self.signing_ca_key = None + self.ca_force_rotate = None + self.autolock_managers = None + self.rotate_worker_token = None + self.rotate_manager_token = None + self.default_addr_pool = None + self.subnet_size = None + + @staticmethod + def from_ansible_params(client): + result = TaskParameters() + for key, value in client.module.params.items(): + if key in result.__dict__: + setattr(result, key, value) + + result.update_parameters(client) + return result + + def update_from_swarm_info(self, swarm_info): + spec = swarm_info['Spec'] + + ca_config = spec.get('CAConfig') or dict() + if self.node_cert_expiry is None: + self.node_cert_expiry = ca_config.get('NodeCertExpiry') + if self.ca_force_rotate is None: + self.ca_force_rotate = ca_config.get('ForceRotate') + + dispatcher = spec.get('Dispatcher') or dict() + if self.dispatcher_heartbeat_period is None: + self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod') + + raft = spec.get('Raft') or dict() + if self.snapshot_interval is None: + self.snapshot_interval = raft.get('SnapshotInterval') + if self.keep_old_snapshots is None: + self.keep_old_snapshots = raft.get('KeepOldSnapshots') + if self.heartbeat_tick is None: + self.heartbeat_tick = raft.get('HeartbeatTick') + if self.log_entries_for_slow_followers is None: + self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers') + if self.election_tick is None: + self.election_tick = raft.get('ElectionTick') + + orchestration = spec.get('Orchestration') or dict() + if self.task_history_retention_limit is None: + self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit') + + encryption_config = spec.get('EncryptionConfig') or dict() + if self.autolock_managers is None: + self.autolock_managers = encryption_config.get('AutoLockManagers') + + if self.name is None: + self.name = spec['Name'] + + if self.labels is None: + self.labels = spec.get('Labels') or {} + + if 'LogDriver' in spec['TaskDefaults']: + self.log_driver = spec['TaskDefaults']['LogDriver'] + + def update_parameters(self, client): + assign = dict( + snapshot_interval='snapshot_interval', + task_history_retention_limit='task_history_retention_limit', + keep_old_snapshots='keep_old_snapshots', + log_entries_for_slow_followers='log_entries_for_slow_followers', + heartbeat_tick='heartbeat_tick', + election_tick='election_tick', + dispatcher_heartbeat_period='dispatcher_heartbeat_period', + node_cert_expiry='node_cert_expiry', + name='name', + labels='labels', + signing_ca_cert='signing_ca_cert', + signing_ca_key='signing_ca_key', + ca_force_rotate='ca_force_rotate', + autolock_managers='autolock_managers', + log_driver='log_driver', + ) + params = dict() + for dest, source in assign.items(): + if not client.option_minimal_versions[source]['supported']: + continue + value = getattr(self, source) + if value is not None: + params[dest] = value + self.spec = client.create_swarm_spec(**params) + + def compare_to_active(self, other, client, differences): + for k in self.__dict__: + if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token', + 'rotate_worker_token', 'rotate_manager_token', 'spec', + 'default_addr_pool', 'subnet_size'): + continue + if not client.option_minimal_versions[k]['supported']: + continue + value = getattr(self, k) + if value is None: + continue + other_value = getattr(other, k) + if value != other_value: + differences.add(k, parameter=value, active=other_value) + if self.rotate_worker_token: + differences.add('rotate_worker_token', parameter=True, active=False) + if self.rotate_manager_token: + differences.add('rotate_manager_token', parameter=True, active=False) + return differences + + +class SwarmManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SwarmManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + self.swarm_info = {} + + self.state = client.module.params['state'] + self.force = client.module.params['force'] + self.node_id = client.module.params['node_id'] + + self.differences = DifferenceTracker() + self.parameters = TaskParameters.from_ansible_params(client) + + self.created = False + + def __call__(self): + choice_map = { + "present": self.init_swarm, + "join": self.join, + "absent": self.leave, + "remove": self.remove, + "inspect": self.inspect_swarm + } + + if self.state == 'inspect': + self.client.module.deprecate( + "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster", + version='2.12', collection_name='ansible.builtin') + + choice_map.get(self.state)() + + if self.client.module._diff or self.parameters.debug: + diff = dict() + diff['before'], diff['after'] = self.differences.get_before_after() + self.results['diff'] = diff + + def inspect_swarm(self): + try: + data = self.client.inspect_swarm() + json_str = json.dumps(data, ensure_ascii=False) + self.swarm_info = json.loads(json_str) + + self.results['changed'] = False + self.results['swarm_facts'] = self.swarm_info + + unlock_key = self.get_unlock_key() + self.swarm_info.update(unlock_key) + except APIError: + return + + def get_unlock_key(self): + default = {'UnlockKey': None} + if not self.has_swarm_lock_changed(): + return default + try: + return self.client.get_unlock_key() or default + except APIError: + return default + + def has_swarm_lock_changed(self): + return self.parameters.autolock_managers and ( + self.created or self.differences.has_difference_for('autolock_managers') + ) + + def init_swarm(self): + if not self.force and self.client.check_if_swarm_manager(): + self.__update_swarm() + return + + if not self.check_mode: + init_arguments = { + 'advertise_addr': self.parameters.advertise_addr, + 'listen_addr': self.parameters.listen_addr, + 'force_new_cluster': self.force, + 'swarm_spec': self.parameters.spec, + } + if self.parameters.default_addr_pool is not None: + init_arguments['default_addr_pool'] = self.parameters.default_addr_pool + if self.parameters.subnet_size is not None: + init_arguments['subnet_size'] = self.parameters.subnet_size + try: + self.client.init_swarm(**init_arguments) + except APIError as exc: + self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc)) + + if not self.client.check_if_swarm_manager(): + if not self.check_mode: + self.client.fail("Swarm not created or other error!") + + self.created = True + self.inspect_swarm() + self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID'))) + self.differences.add('state', parameter='present', active='absent') + self.results['changed'] = True + self.results['swarm_facts'] = { + 'JoinTokens': self.swarm_info.get('JoinTokens'), + 'UnlockKey': self.swarm_info.get('UnlockKey') + } + + def __update_swarm(self): + try: + self.inspect_swarm() + version = self.swarm_info['Version']['Index'] + self.parameters.update_from_swarm_info(self.swarm_info) + old_parameters = TaskParameters() + old_parameters.update_from_swarm_info(self.swarm_info) + self.parameters.compare_to_active(old_parameters, self.client, self.differences) + if self.differences.empty: + self.results['actions'].append("No modification") + self.results['changed'] = False + return + update_parameters = TaskParameters.from_ansible_params(self.client) + update_parameters.update_parameters(self.client) + if not self.check_mode: + self.client.update_swarm( + version=version, swarm_spec=update_parameters.spec, + rotate_worker_token=self.parameters.rotate_worker_token, + rotate_manager_token=self.parameters.rotate_manager_token) + except APIError as exc: + self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc)) + return + + self.inspect_swarm() + self.results['actions'].append("Swarm cluster updated") + self.results['changed'] = True + + def join(self): + if self.client.check_if_swarm_node(): + self.results['actions'].append("This node is already part of a swarm.") + return + if not self.check_mode: + try: + self.client.join_swarm( + remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, + listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr) + except APIError as exc: + self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("New node is added to swarm cluster") + self.differences.add('joined', parameter=True, active=False) + self.results['changed'] = True + + def leave(self): + if not self.client.check_if_swarm_node(): + self.results['actions'].append("This node is not part of a swarm.") + return + if not self.check_mode: + try: + self.client.leave_swarm(force=self.force) + except APIError as exc: + self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node has left the swarm cluster") + self.differences.add('joined', parameter='absent', active='present') + self.results['changed'] = True + + def remove(self): + if not self.client.check_if_swarm_manager(): + self.client.fail("This node is not a manager.") + + try: + status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5) + except APIError: + return + + if not status_down: + self.client.fail("Can not remove the node. The status node is ready and not down.") + + if not self.check_mode: + try: + self.client.remove_node(node_id=self.node_id, force=self.force) + except APIError as exc: + self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node is removed from swarm cluster.") + self.differences.add('joined', parameter=False, active=True) + self.results['changed'] = True + + +def _detect_remove_operation(client): + return client.module.params['state'] == 'remove' + + +def main(): + argument_spec = dict( + advertise_addr=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']), + force=dict(type='bool', default=False), + listen_addr=dict(type='str', default='0.0.0.0:2377'), + remote_addrs=dict(type='list', elements='str'), + join_token=dict(type='str'), + snapshot_interval=dict(type='int'), + task_history_retention_limit=dict(type='int'), + keep_old_snapshots=dict(type='int'), + log_entries_for_slow_followers=dict(type='int'), + heartbeat_tick=dict(type='int'), + election_tick=dict(type='int'), + dispatcher_heartbeat_period=dict(type='int'), + node_cert_expiry=dict(type='int'), + name=dict(type='str'), + labels=dict(type='dict'), + signing_ca_cert=dict(type='str'), + signing_ca_key=dict(type='str'), + ca_force_rotate=dict(type='int'), + autolock_managers=dict(type='bool'), + node_id=dict(type='str'), + rotate_worker_token=dict(type='bool', default=False), + rotate_manager_token=dict(type='bool', default=False), + default_addr_pool=dict(type='list', elements='str'), + subnet_size=dict(type='int'), + ) + + required_if = [ + ('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']), + ('state', 'remove', ['node_id']) + ] + + option_minimal_versions = dict( + labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'), + signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + autolock_managers=dict(docker_py_version='2.6.0'), + log_driver=dict(docker_py_version='2.6.0'), + remove_operation=dict( + docker_py_version='2.4.0', + detect_usage=_detect_remove_operation, + usage_msg='remove swarm nodes' + ), + default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='1.10.0', + min_docker_api_version='1.25', + option_minimal_versions=option_minimal_versions, + ) + + try: + results = dict( + changed=False, + result='', + actions=[] + ) + + SwarmManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2.py b/test/support/integration/plugins/modules/ec2.py new file mode 100644 index 00000000..952aa5a1 --- /dev/null +++ b/test/support/integration/plugins/modules/ec2.py @@ -0,0 +1,1766 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ec2 +short_description: create, terminate, start or stop an instance in ec2 +description: + - Creates or terminates ec2 instances. + - > + Note: This module uses the older boto Python module to interact with the EC2 API. + M(ec2) will still receive bug fixes, but no new features. + Consider using the M(ec2_instance) module instead. + If M(ec2_instance) does not support a feature you need that is available in M(ec2), please + file a feature request. +version_added: "0.9" +options: + key_name: + description: + - Key pair to use on the instance. + - The SSH key must already exist in AWS in order to use this argument. + - Keys can be created / deleted using the M(ec2_key) module. + aliases: ['keypair'] + type: str + id: + version_added: "1.1" + description: + - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. + - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. + - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + type: str + group: + description: + - Security group (or list of groups) to use with the instance. + aliases: [ 'groups' ] + type: list + elements: str + group_id: + version_added: "1.1" + description: + - Security group id (or list of ids) to use with the instance. + type: list + elements: str + zone: + version_added: "1.2" + description: + - AWS availability zone in which to launch the instance. + aliases: [ 'aws_zone', 'ec2_zone' ] + type: str + instance_type: + description: + - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + - Required when creating a new instance. + type: str + aliases: ['type'] + tenancy: + version_added: "1.9" + description: + - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC. + - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well. + - Dedicated tenancy is not available for EC2 "micro" instances. + default: default + choices: [ "default", "dedicated" ] + type: str + spot_price: + version_added: "1.5" + description: + - Maximum spot price to bid. If not set, a regular on-demand instance is requested. + - A spot request is made with this maximum bid. When it is filled, the instance is started. + type: str + spot_type: + version_added: "2.0" + description: + - The type of spot request. + - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again. + default: "one-time" + choices: [ "one-time", "persistent" ] + type: str + image: + description: + - I(ami) ID to use for the instance. + - Required when I(state=present). + type: str + kernel: + description: + - Kernel eki to use for the instance. + type: str + ramdisk: + description: + - Ramdisk eri to use for the instance. + type: str + wait: + description: + - Wait for the instance to reach its desired state before returning. + - Does not wait for SSH, see the 'wait_for_connection' example for details. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + spot_wait_timeout: + version_added: "1.5" + description: + - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan. + default: 600 + type: int + count: + description: + - Number of instances to launch. + default: 1 + type: int + monitoring: + version_added: "1.1" + description: + - Enable detailed monitoring (CloudWatch) for instance. + type: bool + default: false + user_data: + version_added: "0.9" + description: + - Opaque blob of data which is made available to the EC2 instance. + type: str + instance_tags: + version_added: "1.0" + description: + - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'. + type: dict + placement_group: + version_added: "1.3" + description: + - Placement group for the instance when using EC2 Clustered Compute. + type: str + vpc_subnet_id: + version_added: "1.1" + description: + - the subnet ID in which to launch the instance (VPC). + type: str + assign_public_ip: + version_added: "1.5" + description: + - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+. + type: bool + private_ip: + version_added: "1.2" + description: + - The private ip address to assign the instance (from the vpc subnet). + type: str + instance_profile_name: + version_added: "1.3" + description: + - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+. + type: str + instance_ids: + version_added: "1.3" + description: + - "list of instance ids, currently used for states: absent, running, stopped" + aliases: ['instance_id'] + type: list + elements: str + source_dest_check: + version_added: "1.6" + description: + - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers). + When initially creating an instance the EC2 API defaults this to C(True). + type: bool + termination_protection: + version_added: "2.0" + description: + - Enable or Disable the Termination Protection. + type: bool + default: false + instance_initiated_shutdown_behavior: + version_added: "2.2" + description: + - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store. + images (which require termination on shutdown). + default: 'stop' + choices: [ "stop", "terminate" ] + type: str + state: + version_added: "1.3" + description: + - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2. + - When I(state=absent), I(instance_ids) is required. + - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required. + default: 'present' + choices: ['absent', 'present', 'restarted', 'running', 'stopped'] + type: str + volumes: + version_added: "1.5" + description: + - A list of hash/dictionaries of volumes to add to the new instance. + type: list + elements: dict + suboptions: + device_name: + type: str + required: true + description: + - A name for the device (For example C(/dev/sda)). + delete_on_termination: + type: bool + default: false + description: + - Whether the volume should be automatically deleted when the instance is terminated. + ephemeral: + type: str + description: + - Whether the volume should be ephemeral. + - Data on ephemeral volumes is lost when the instance is stopped. + - Mutually exclusive with the I(snapshot) parameter. + encrypted: + type: bool + default: false + description: + - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK. + snapshot: + type: str + description: + - The ID of an EBS snapshot to copy when creating the volume. + - Mutually exclusive with the I(ephemeral) parameter. + volume_type: + type: str + description: + - The type of volume to create. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types. + volume_size: + type: int + description: + - The size of the volume (in GiB). + iops: + type: int + description: + - The number of IOPS per second to provision for the volume. + - Required when I(volume_type=io1). + ebs_optimized: + version_added: "1.6" + description: + - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + default: false + type: bool + exact_count: + version_added: "1.5" + description: + - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. + Instances are either created or terminated based on this value. + type: int + count_tag: + version_added: "1.5" + description: + - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running. + This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers + that are tagged with "class=webserver". The specified tag must already exist or be passed in as the I(instance_tags) option. + type: raw + network_interfaces: + version_added: "2.0" + description: + - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, + none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are + for creating a new network interface at launch.) + aliases: ['network_interface'] + type: list + elements: str + spot_launch_group: + version_added: "2.1" + description: + - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group). + type: str +author: + - "Tim Gerla (@tgerla)" + - "Lester Wade (@lwade)" + - "Seth Vidal (@skvidal)" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic provisioning example +- ec2: + key_name: mykey + instance_type: t2.micro + image: ami-123456 + wait: yes + group: webserver + count: 3 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Advanced example with tagging and CloudWatch +- ec2: + key_name: mykey + group: databases + instance_type: t2.micro + image: ami-123456 + wait: yes + wait_timeout: 500 + count: 5 + instance_tags: + db: postgres + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Single instance with additional IOPS volume from snapshot and volume delete on termination +- ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/sdb + snapshot: snap-abcdef12 + volume_type: io1 + iops: 1000 + volume_size: 100 + delete_on_termination: true + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Single instance with ssd gp2 root volume +- ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/xvda + volume_type: gp2 + volume_size: 8 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + count_tag: + Name: dbserver + exact_count: 1 + +# Multiple groups example +- ec2: + key_name: mykey + group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] + instance_type: m1.large + image: ami-6e649707 + wait: yes + wait_timeout: 500 + count: 5 + instance_tags: + db: postgres + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Multiple instances with additional volume from snapshot +- ec2: + key_name: mykey + group: webserver + instance_type: m1.large + image: ami-6e649707 + wait: yes + wait_timeout: 500 + count: 5 + volumes: + - device_name: /dev/sdb + snapshot: snap-abcdef12 + volume_size: 10 + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Dedicated tenancy example +- local_action: + module: ec2 + assign_public_ip: yes + group_id: sg-1dc53f72 + key_name: mykey + image: ami-6e649707 + instance_type: m1.small + tenancy: dedicated + vpc_subnet_id: subnet-29e63245 + wait: yes + +# Spot instance example +- ec2: + spot_price: 0.24 + spot_wait_timeout: 600 + keypair: mykey + group_id: sg-1dc53f72 + instance_type: m1.small + image: ami-6e649707 + wait: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + spot_launch_group: report_generators + instance_initiated_shutdown_behavior: terminate + +# Examples using pre-existing network interfaces +- ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interface: eni-deadbeef + +- ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e'] + +# Launch instances, runs some tasks +# and then terminate them + +- name: Create a sandbox instance + hosts: localhost + gather_facts: False + vars: + keypair: my_keypair + instance_type: m1.small + security_group: my_securitygroup + image: my_ami_id + region: us-east-1 + tasks: + - name: Launch instance + ec2: + key_name: "{{ keypair }}" + group: "{{ security_group }}" + instance_type: "{{ instance_type }}" + image: "{{ image }}" + wait: true + region: "{{ region }}" + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + register: ec2 + + - name: Add new instance to host group + add_host: + hostname: "{{ item.public_ip }}" + groupname: launched + loop: "{{ ec2.instances }}" + + - name: Wait for SSH to come up + delegate_to: "{{ item.public_dns_name }}" + wait_for_connection: + delay: 60 + timeout: 320 + loop: "{{ ec2.instances }}" + +- name: Configure instance(s) + hosts: launched + become: True + gather_facts: True + roles: + - my_awesome_role + - my_awesome_test + +- name: Terminate instances + hosts: localhost + tasks: + - name: Terminate instances that were previously launched + ec2: + state: 'absent' + instance_ids: '{{ ec2.instance_ids }}' + +# Start a few existing instances, run some tasks +# and stop the instances + +- name: Start sandbox instances + hosts: localhost + gather_facts: false + vars: + instance_ids: + - 'i-xxxxxx' + - 'i-xxxxxx' + - 'i-xxxxxx' + region: us-east-1 + tasks: + - name: Start the sandbox instances + ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: running + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + roles: + - do_neat_stuff + - do_more_neat_stuff + +- name: Stop sandbox instances + hosts: localhost + gather_facts: false + vars: + instance_ids: + - 'i-xxxxxx' + - 'i-xxxxxx' + - 'i-xxxxxx' + region: us-east-1 + tasks: + - name: Stop the sandbox instances + ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: stopped + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# Start stopped instances specified by tag +# +- local_action: + module: ec2 + instance_tags: + Name: ExtraPower + state: running + +# +# Restart instances specified by tag +# +- local_action: + module: ec2 + instance_tags: + Name: ExtraPower + state: restarted + +# +# Enforce that 5 instances with a tag "foo" are running +# (Highly recommended!) +# + +- ec2: + key_name: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: yes + group: webserver + instance_tags: + foo: bar + exact_count: 5 + count_tag: foo + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# Enforce that 5 running instances named "database" with a "dbtype" of "postgres" +# + +- ec2: + key_name: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: yes + group: webserver + instance_tags: + Name: database + dbtype: postgres + exact_count: 5 + count_tag: + Name: database + dbtype: postgres + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# count_tag complex argument examples +# + + # instances with tag foo +- ec2: + count_tag: + foo: + + # instances with tag foo=bar +- ec2: + count_tag: + foo: bar + + # instances with tags foo=bar & baz +- ec2: + count_tag: + foo: bar + baz: + + # instances with tags foo & bar & baz=bang +- ec2: + count_tag: + - foo + - bar + - baz: bang + +''' + +import time +import datetime +import traceback +from ast import literal_eval +from distutils.version import LooseVersion + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect +from ansible.module_utils.six import get_function_code, string_types +from ansible.module_utils._text import to_bytes, to_text + +try: + import boto.ec2 + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping + from boto.exception import EC2ResponseError + from boto import connect_ec2_endpoint + from boto import connect_vpc + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None): + + # get reservations for instances that match tag(s) and are in the desired state + state = module.params.get('state') + if state not in ['running', 'stopped']: + state = None + reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone) + + instances = [] + for res in reservations: + if hasattr(res, 'instances'): + for inst in res.instances: + if inst.state == 'terminated' or inst.state == 'shutting-down': + continue + instances.append(inst) + + return reservations, instances + + +def _set_none_to_blank(dictionary): + result = dictionary + for k in result: + if isinstance(result[k], dict): + result[k] = _set_none_to_blank(result[k]) + elif not result[k]: + result[k] = "" + return result + + +def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None): + # TODO: filters do not work with tags that have underscores + filters = dict() + + vpc_subnet_id = module.params.get('vpc_subnet_id') + vpc_id = None + if vpc_subnet_id: + filters.update({"subnet-id": vpc_subnet_id}) + if vpc: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + + if vpc_id: + filters.update({"vpc-id": vpc_id}) + + if tags is not None: + + if isinstance(tags, str): + try: + tags = literal_eval(tags) + except Exception: + pass + + # if not a string type, convert and make sure it's a text string + if isinstance(tags, int): + tags = to_text(tags) + + # if string, we only care that a tag of that name exists + if isinstance(tags, str): + filters.update({"tag-key": tags}) + + # if list, append each item to filters + if isinstance(tags, list): + for x in tags: + if isinstance(x, dict): + x = _set_none_to_blank(x) + filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items())) + else: + filters.update({"tag-key": x}) + + # if dict, add the key and value to the filter + if isinstance(tags, dict): + tags = _set_none_to_blank(tags) + filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items())) + + # lets check to see if the filters dict is empty, if so then stop + if not filters: + module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags)) + + if state: + # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api + filters.update({'instance-state-name': state}) + + if zone: + filters.update({'availability-zone': zone}) + + if module.params.get('id'): + filters['client-token'] = module.params['id'] + + results = ec2.get_all_instances(filters=filters) + + return results + + +def get_instance_info(inst): + """ + Retrieves instance information from an instance + ID and returns it as a dictionary + """ + instance_info = {'id': inst.id, + 'ami_launch_index': inst.ami_launch_index, + 'private_ip': inst.private_ip_address, + 'private_dns_name': inst.private_dns_name, + 'public_ip': inst.ip_address, + 'dns_name': inst.dns_name, + 'public_dns_name': inst.public_dns_name, + 'state_code': inst.state_code, + 'architecture': inst.architecture, + 'image_id': inst.image_id, + 'key_name': inst.key_name, + 'placement': inst.placement, + 'region': inst.placement[:-1], + 'kernel': inst.kernel, + 'ramdisk': inst.ramdisk, + 'launch_time': inst.launch_time, + 'instance_type': inst.instance_type, + 'root_device_type': inst.root_device_type, + 'root_device_name': inst.root_device_name, + 'state': inst.state, + 'hypervisor': inst.hypervisor, + 'tags': inst.tags, + 'groups': dict((group.id, group.name) for group in inst.groups), + } + try: + instance_info['virtualization_type'] = getattr(inst, 'virtualization_type') + except AttributeError: + instance_info['virtualization_type'] = None + + try: + instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized') + except AttributeError: + instance_info['ebs_optimized'] = False + + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + + try: + instance_info['tenancy'] = getattr(inst, 'placement_tenancy') + except AttributeError: + instance_info['tenancy'] = 'default' + + return instance_info + + +def boto_supports_associate_public_ip_address(ec2): + """ + Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification + class. Added in Boto 2.13.0 + + ec2: authenticated ec2 connection object + + Returns: + True if Boto library accepts associate_public_ip_address argument, else false + """ + + try: + network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification() + getattr(network_interface, "associate_public_ip_address") + return True + except AttributeError: + return False + + +def boto_supports_profile_name_arg(ec2): + """ + Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0 + + ec2: authenticated ec2 connection object + + Returns: + True if Boto library accept instance_profile_name argument, else false + """ + run_instances_method = getattr(ec2, 'run_instances') + return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames + + +def boto_supports_volume_encryption(): + """ + Check if Boto library supports encryption of EBS volumes (added in 2.29.0) + + Returns: + True if boto library has the named param as an argument on the request_spot_instances method, else False + """ + return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') + + +def create_block_device(module, ec2, volume): + # Not aware of a way to determine this programatically + # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ + MAX_IOPS_TO_SIZE_RATIO = 30 + + volume_type = volume.get('volume_type') + + if 'snapshot' not in volume and 'ephemeral' not in volume: + if 'volume_size' not in volume: + module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') + if 'snapshot' in volume: + if volume_type == 'io1' and 'iops' not in volume: + module.fail_json(msg='io1 volumes must have an iops value set') + if 'iops' in volume: + snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0] + size = volume.get('volume_size', snapshot.volume_size) + if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: + module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) + if 'ephemeral' in volume: + if 'snapshot' in volume: + module.fail_json(msg='Cannot set both ephemeral and snapshot') + if boto_supports_volume_encryption(): + return BlockDeviceType(snapshot_id=volume.get('snapshot'), + ephemeral_name=volume.get('ephemeral'), + size=volume.get('volume_size'), + volume_type=volume_type, + delete_on_termination=volume.get('delete_on_termination', False), + iops=volume.get('iops'), + encrypted=volume.get('encrypted', None)) + else: + return BlockDeviceType(snapshot_id=volume.get('snapshot'), + ephemeral_name=volume.get('ephemeral'), + size=volume.get('volume_size'), + volume_type=volume_type, + delete_on_termination=volume.get('delete_on_termination', False), + iops=volume.get('iops')) + + +def boto_supports_param_in_spot_request(ec2, param): + """ + Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. + + ec2: authenticated ec2 connection object + + Returns: + True if boto library has the named param as an argument on the request_spot_instances method, else False + """ + method = getattr(ec2, 'request_spot_instances') + return param in get_function_code(method).co_varnames + + +def await_spot_requests(module, ec2, spot_requests, count): + """ + Wait for a group of spot requests to be fulfilled, or fail. + + module: Ansible module object + ec2: authenticated ec2 connection object + spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances + count: Total number of instances to be created by the spot requests + + Returns: + list of instance ID's created by the spot request(s) + """ + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) + wait_complete = time.time() + spot_wait_timeout + + spot_req_inst_ids = dict() + while time.time() < wait_complete: + reqs = ec2.get_all_spot_instance_requests() + for sirb in spot_requests: + if sirb.id in spot_req_inst_ids: + continue + for sir in reqs: + if sir.id != sirb.id: + continue # this is not our spot instance + if sir.instance_id is not None: + spot_req_inst_ids[sirb.id] = sir.instance_id + elif sir.state == 'open': + continue # still waiting, nothing to do here + elif sir.state == 'active': + continue # Instance is created already, nothing to do here + elif sir.state == 'failed': + module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % ( + sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + elif sir.state == 'cancelled': + module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id) + elif sir.state == 'closed': + # instance is terminating or marked for termination + # this may be intentional on the part of the operator, + # or it may have been terminated by AWS due to capacity, + # price, or group constraints in this case, we'll fail + # the module if the reason for the state is anything + # other than termination by user. Codes are documented at + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html + if sir.status.code == 'instance-terminated-by-user': + # do nothing, since the user likely did this on purpose + pass + else: + spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s" + module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + + if len(spot_req_inst_ids) < count: + time.sleep(5) + else: + return list(spot_req_inst_ids.values()) + module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime()) + + +def enforce_count(module, ec2, vpc): + + exact_count = module.params.get('exact_count') + count_tag = module.params.get('count_tag') + zone = module.params.get('zone') + + # fail here if the exact count was specified without filtering + # on a tag, as this may lead to a undesired removal of instances + if exact_count and count_tag is None: + module.fail_json(msg="you must use the 'count_tag' option with exact_count") + + reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone) + + changed = None + checkmode = False + instance_dict_array = [] + changed_instance_ids = None + + if len(instances) == exact_count: + changed = False + elif len(instances) < exact_count: + changed = True + to_create = exact_count - len(instances) + if not checkmode: + (instance_dict_array, changed_instance_ids, changed) \ + = create_instances(module, ec2, vpc, override_count=to_create) + + for inst in instance_dict_array: + instances.append(inst) + elif len(instances) > exact_count: + changed = True + to_remove = len(instances) - exact_count + if not checkmode: + all_instance_ids = sorted([x.id for x in instances]) + remove_ids = all_instance_ids[0:to_remove] + + instances = [x for x in instances if x.id not in remove_ids] + + (changed, instance_dict_array, changed_instance_ids) \ + = terminate_instances(module, ec2, remove_ids) + terminated_list = [] + for inst in instance_dict_array: + inst['state'] = "terminated" + terminated_list.append(inst) + instance_dict_array = terminated_list + + # ensure all instances are dictionaries + all_instances = [] + for inst in instances: + + if not isinstance(inst, dict): + warn_if_public_ip_assignment_changed(module, inst) + inst = get_instance_info(inst) + all_instances.append(inst) + + return (all_instances, instance_dict_array, changed_instance_ids, changed) + + +def create_instances(module, ec2, vpc, override_count=None): + """ + Creates new instances + + module : AnsibleModule object + ec2: authenticated ec2 connection object + + Returns: + A list of dictionaries with instance information + about the instances that were launched + """ + + key_name = module.params.get('key_name') + id = module.params.get('id') + group_name = module.params.get('group') + group_id = module.params.get('group_id') + zone = module.params.get('zone') + instance_type = module.params.get('instance_type') + tenancy = module.params.get('tenancy') + spot_price = module.params.get('spot_price') + spot_type = module.params.get('spot_type') + image = module.params.get('image') + if override_count: + count = override_count + else: + count = module.params.get('count') + monitoring = module.params.get('monitoring') + kernel = module.params.get('kernel') + ramdisk = module.params.get('ramdisk') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) + placement_group = module.params.get('placement_group') + user_data = module.params.get('user_data') + instance_tags = module.params.get('instance_tags') + vpc_subnet_id = module.params.get('vpc_subnet_id') + assign_public_ip = module.boolean(module.params.get('assign_public_ip')) + private_ip = module.params.get('private_ip') + instance_profile_name = module.params.get('instance_profile_name') + volumes = module.params.get('volumes') + ebs_optimized = module.params.get('ebs_optimized') + exact_count = module.params.get('exact_count') + count_tag = module.params.get('count_tag') + source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) + network_interfaces = module.params.get('network_interfaces') + spot_launch_group = module.params.get('spot_launch_group') + instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior') + + vpc_id = None + if vpc_subnet_id: + if not vpc: + module.fail_json(msg="region must be specified") + else: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + else: + vpc_id = None + + try: + # Here we try to lookup the group id from the security group name - if group is set. + if group_name: + if vpc_id: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id}) + else: + grp_details = ec2.get_all_security_groups() + if isinstance(group_name, string_types): + group_name = [group_name] + unmatched = set(group_name).difference(str(grp.name) for grp in grp_details) + if len(unmatched) > 0: + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) + group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name] + # Now we try to lookup the group id testing if group exists. + elif group_id: + # wrap the group_id in a list if it's not one already + if isinstance(group_id, string_types): + group_id = [group_id] + grp_details = ec2.get_all_security_groups(group_ids=group_id) + group_name = [grp_item.name for grp_item in grp_details] + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + # Lookup any instances that much our run id. + + running_instances = [] + count_remaining = int(count) + + if id is not None: + filter_dict = {'client-token': id, 'instance-state-name': 'running'} + previous_reservations = ec2.get_all_instances(None, filter_dict) + for res in previous_reservations: + for prev_instance in res.instances: + running_instances.append(prev_instance) + count_remaining = count_remaining - len(running_instances) + + # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. + + if count_remaining == 0: + changed = False + else: + changed = True + try: + params = {'image_id': image, + 'key_name': key_name, + 'monitoring_enabled': monitoring, + 'placement': zone, + 'instance_type': instance_type, + 'kernel_id': kernel, + 'ramdisk_id': ramdisk} + if user_data is not None: + params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict') + + if ebs_optimized: + params['ebs_optimized'] = ebs_optimized + + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request + if not spot_price: + params['tenancy'] = tenancy + + if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name + else: + if instance_profile_name is not None: + module.fail_json( + msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") + + if assign_public_ip is not None: + if not boto_supports_associate_public_ip_address(ec2): + module.fail_json( + msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.") + elif not vpc_subnet_id: + module.fail_json( + msg="assign_public_ip only available with vpc_subnet_id") + + else: + if private_ip: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + private_ip_address=private_ip, + groups=group_id, + associate_public_ip_address=assign_public_ip) + else: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + groups=group_id, + associate_public_ip_address=assign_public_ip) + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + params['network_interfaces'] = interfaces + else: + if network_interfaces: + if isinstance(network_interfaces, string_types): + network_interfaces = [network_interfaces] + interfaces = [] + for i, network_interface_id in enumerate(network_interfaces): + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + network_interface_id=network_interface_id, + device_index=i) + interfaces.append(interface) + params['network_interfaces'] = \ + boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces) + else: + params['subnet_id'] = vpc_subnet_id + if vpc_subnet_id: + params['security_group_ids'] = group_id + else: + params['security_groups'] = group_name + + if volumes: + bdm = BlockDeviceMapping() + for volume in volumes: + if 'device_name' not in volume: + module.fail_json(msg='Device name must be set for volume') + # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 + # to be a signal not to create this volume + if 'volume_size' not in volume or int(volume['volume_size']) > 0: + bdm[volume['device_name']] = create_block_device(module, ec2, volume) + + params['block_device_map'] = bdm + + # check to see if we're using spot pricing first before starting instances + if not spot_price: + if assign_public_ip is not None and private_ip: + params.update( + dict( + min_count=count_remaining, + max_count=count_remaining, + client_token=id, + placement_group=placement_group, + ) + ) + else: + params.update( + dict( + min_count=count_remaining, + max_count=count_remaining, + client_token=id, + placement_group=placement_group, + private_ip_address=private_ip, + ) + ) + + # For ordinary (not spot) instances, we can select 'stop' + # (the default) or 'terminate' here. + params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop' + + try: + res = ec2.run_instances(**params) + except boto.exception.EC2ResponseError as e: + if (params['instance_initiated_shutdown_behavior'] != 'terminate' and + "InvalidParameterCombination" == e.error_code): + params['instance_initiated_shutdown_behavior'] = 'terminate' + res = ec2.run_instances(**params) + else: + raise + + instids = [i.id for i in res.instances] + while True: + try: + ec2.get_all_instances(instids) + break + except boto.exception.EC2ResponseError as e: + if "<Code>InvalidInstanceID.NotFound</Code>" in str(e): + # there's a race between start and get an instance + continue + else: + module.fail_json(msg=str(e)) + + # The instances returned through ec2.run_instances above can be in + # terminated state due to idempotency. See commit 7f11c3d for a complete + # explanation. + terminated_instances = [ + str(instance.id) for instance in res.instances if instance.state == 'terminated' + ] + if terminated_instances: + module.fail_json(msg="Instances with id(s) %s " % terminated_instances + + "were created previously but have since been terminated - " + + "use a (possibly different) 'instanceid' parameter") + + else: + if private_ip: + module.fail_json( + msg='private_ip only available with on-demand (non-spot) instances') + if boto_supports_param_in_spot_request(ec2, 'placement_group'): + params['placement_group'] = placement_group + elif placement_group: + module.fail_json( + msg="placement_group parameter requires Boto version 2.3.0 or higher.") + + # You can't tell spot instances to 'stop'; they will always be + # 'terminate'd. For convenience, we'll ignore the latter value. + if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate': + module.fail_json( + msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.") + + if spot_launch_group and isinstance(spot_launch_group, string_types): + params['launch_group'] = spot_launch_group + + params.update(dict( + count=count_remaining, + type=spot_type, + )) + + # Set spot ValidUntil + # ValidUntil -> (timestamp). The end date of the request, in + # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z). + utc_valid_until = ( + datetime.datetime.utcnow() + + datetime.timedelta(seconds=spot_wait_timeout)) + params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z') + + res = ec2.request_spot_instances(spot_price, **params) + + # Now we have to do the intermediate waiting + if wait: + instids = await_spot_requests(module, ec2, res, count) + else: + instids = [] + except boto.exception.BotoServerError as e: + module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message)) + + # wait here until the instances are up + num_running = 0 + wait_timeout = time.time() + wait_timeout + res_list = () + while wait_timeout > time.time() and num_running < len(instids): + try: + res_list = ec2.get_all_instances(instids) + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidInstanceID.NotFound': + time.sleep(1) + continue + else: + raise + + num_running = 0 + for res in res_list: + num_running += len([i for i in res.instances if i.state == 'running']) + if len(res_list) <= 0: + # got a bad response of some sort, possibly due to + # stale/cached data. Wait a second and then try again + time.sleep(1) + continue + if wait and num_running < len(instids): + time.sleep(5) + else: + break + + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for instances running timeout on %s" % time.asctime()) + + # We do this after the loop ends so that we end up with one list + for res in res_list: + running_instances.extend(res.instances) + + # Enabled by default by AWS + if source_dest_check is False: + for inst in res.instances: + inst.modify_attribute('sourceDestCheck', False) + + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound + if instance_tags and instids: + try: + ec2.create_tags(instids, instance_tags) + except boto.exception.EC2ResponseError as e: + module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) + + instance_dict_array = [] + created_instance_ids = [] + for inst in running_instances: + inst.update() + d = get_instance_info(inst) + created_instance_ids.append(inst.id) + instance_dict_array.append(d) + + return (instance_dict_array, created_instance_ids, changed) + + +def terminate_instances(module, ec2, instance_ids): + """ + Terminates a list of instances + + module: Ansible module object + ec2: authenticated ec2 connection object + termination_list: a list of instances to terminate in the form of + [ {id: <inst-id>}, ..] + + Returns a dictionary of instance information + about the instances terminated. + + If the instance to be terminated is running + "changed" will be set to False. + + """ + + # Whether to wait for termination to complete before returning + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + terminated_instance_ids = [] + for res in ec2.get_all_instances(instance_ids): + for inst in res.instances: + if inst.state == 'running' or inst.state == 'stopped': + terminated_instance_ids.append(inst.id) + instance_dict_array.append(get_instance_info(inst)) + try: + ec2.terminate_instances([inst.id]) + except EC2ResponseError as e: + module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) + changed = True + + # wait here until the instances are 'terminated' + if wait: + num_terminated = 0 + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids): + response = ec2.get_all_instances(instance_ids=terminated_instance_ids, + filters={'instance-state-name': 'terminated'}) + try: + num_terminated = sum([len(res.instances) for res in response]) + except Exception as e: + # got a bad response of some sort, possibly due to + # stale/cached data. Wait a second and then try again + time.sleep(1) + continue + + if num_terminated < len(terminated_instance_ids): + time.sleep(5) + + # waiting took too long + if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids): + module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime()) + # Lets get the current state of the instances after terminating - issue600 + instance_dict_array = [] + for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}): + for inst in res.instances: + instance_dict_array.append(get_instance_info(inst)) + + return (changed, instance_dict_array, terminated_instance_ids) + + +def startstop_instances(module, ec2, instance_ids, state, instance_tags): + """ + Starts or stops a list of existing instances + + module: Ansible module object + ec2: authenticated ec2 connection object + instance_ids: The list of instances to start in the form of + [ {id: <inst-id>}, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } + state: Intended state ("running" or "stopped") + + Returns a dictionary of instance information + about the instances started/stopped. + + If the instance was not able to change state, + "changed" will be set to False. + + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two + """ + + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + group_id = module.params.get('group_id') + group_name = module.params.get('group') + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + + if module.params.get('id'): + filters['client-token'] = module.params['id'] + # Check that our instances are not in the state we want to take + + # Check (and eventually change) instances attributes and instances state + existing_instances_array = [] + for res in ec2.get_all_instances(instance_ids, filters=filters): + for inst in res.instances: + + warn_if_public_ip_assignment_changed(module, inst) + + changed = (check_source_dest_attr(module, inst, ec2) or + check_termination_protection(module, inst) or changed) + + # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified + if inst.vpc_id and group_name: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id}) + if isinstance(group_name, string_types): + group_name = [group_name] + unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details) + if unmatched: + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) + group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name] + elif inst.vpc_id and group_id: + if isinstance(group_id, string_types): + group_id = [group_id] + grp_details = ec2.get_all_security_groups(group_ids=group_id) + group_ids = [grp_item.id for grp_item in grp_details] + if inst.vpc_id and (group_name or group_id): + if set(sg.id for sg in inst.groups) != set(group_ids): + changed = inst.modify_attribute('groupSet', group_ids) + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError as e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True + existing_instances_array.append(inst.id) + + instance_ids = list(set(existing_instances_array + (instance_ids or []))) + # Wait for all the instances to finish starting or stopping + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time(): + instance_dict_array = [] + matched_instances = [] + for res in ec2.get_all_instances(instance_ids): + for i in res.instances: + if i.state == state: + instance_dict_array.append(get_instance_info(i)) + matched_instances.append(i) + if len(matched_instances) < len(instance_ids): + time.sleep(5) + else: + break + + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for instances running timeout on %s" % time.asctime()) + + return (changed, instance_dict_array, instance_ids) + + +def restart_instances(module, ec2, instance_ids, state, instance_tags): + """ + Restarts a list of existing instances + + module: Ansible module object + ec2: authenticated ec2 connection object + instance_ids: The list of instances to start in the form of + [ {id: <inst-id>}, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } + state: Intended state ("restarted") + + Returns a dictionary of instance information + about the instances. + + If the instance was not able to change state, + "changed" will be set to False. + + Wait will not apply here as this is a OS level operation. + + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two. + """ + + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + if module.params.get('id'): + filters['client-token'] = module.params['id'] + + # Check that our instances are not in the state we want to take + + # Check (and eventually change) instances attributes and instances state + for res in ec2.get_all_instances(instance_ids, filters=filters): + for inst in res.instances: + + warn_if_public_ip_assignment_changed(module, inst) + + changed = (check_source_dest_attr(module, inst, ec2) or + check_termination_protection(module, inst) or changed) + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + inst.reboot() + except EC2ResponseError as e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True + + return (changed, instance_dict_array, instance_ids) + + +def check_termination_protection(module, inst): + """ + Check the instance disableApiTermination attribute. + + module: Ansible module object + inst: EC2 instance object + + returns: True if state changed None otherwise + """ + + termination_protection = module.params.get('termination_protection') + + if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None): + inst.modify_attribute('disableApiTermination', termination_protection) + return True + + +def check_source_dest_attr(module, inst, ec2): + """ + Check the instance sourceDestCheck attribute. + + module: Ansible module object + inst: EC2 instance object + + returns: True if state changed None otherwise + """ + + source_dest_check = module.params.get('source_dest_check') + + if source_dest_check is not None: + try: + if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + return True + except boto.exception.EC2ResponseError as exc: + # instances with more than one Elastic Network Interface will + # fail, because they have the sourceDestCheck attribute defined + # per-interface + if exc.code == 'InvalidInstanceID': + for interface in inst.interfaces: + if interface.source_dest_check != source_dest_check: + ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check) + return True + else: + module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc), + exception=traceback.format_exc()) + + +def warn_if_public_ip_assignment_changed(module, instance): + # This is a non-modifiable attribute. + assign_public_ip = module.params.get('assign_public_ip') + + # Check that public ip assignment is the same and warn if not + public_dns_name = getattr(instance, 'public_dns_name', None) + if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False): + module.warn("Unable to modify public ip assignment to {0} for instance {1}. " + "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + key_name=dict(aliases=['keypair']), + id=dict(), + group=dict(type='list', aliases=['groups']), + group_id=dict(type='list'), + zone=dict(aliases=['aws_zone', 'ec2_zone']), + instance_type=dict(aliases=['type']), + spot_price=dict(), + spot_type=dict(default='one-time', choices=["one-time", "persistent"]), + spot_launch_group=dict(), + image=dict(), + kernel=dict(), + count=dict(type='int', default='1'), + monitoring=dict(type='bool', default=False), + ramdisk=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + spot_wait_timeout=dict(type='int', default=600), + placement_group=dict(), + user_data=dict(), + instance_tags=dict(type='dict'), + vpc_subnet_id=dict(), + assign_public_ip=dict(type='bool'), + private_ip=dict(), + instance_profile_name=dict(), + instance_ids=dict(type='list', aliases=['instance_id']), + source_dest_check=dict(type='bool', default=None), + termination_protection=dict(type='bool', default=None), + state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']), + instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']), + exact_count=dict(type='int', default=None), + count_tag=dict(type='raw'), + volumes=dict(type='list'), + ebs_optimized=dict(type='bool', default=False), + tenancy=dict(default='default', choices=['default', 'dedicated']), + network_interfaces=dict(type='list', aliases=['network_interface']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + # Can be uncommented when we finish the deprecation cycle. + # ['group', 'group_id'], + ['exact_count', 'count'], + ['exact_count', 'state'], + ['exact_count', 'instance_ids'], + ['network_interfaces', 'assign_public_ip'], + ['network_interfaces', 'group'], + ['network_interfaces', 'group_id'], + ['network_interfaces', 'private_ip'], + ['network_interfaces', 'vpc_subnet_id'], + ], + ) + + if module.params.get('group') and module.params.get('group_id'): + module.deprecate( + msg='Support for passing both group and group_id has been deprecated. ' + 'Currently group_id is ignored, in future passing both will result in an error', + version='2.14', collection_name='ansible.builtin') + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + if module.params.get('region') or not module.params.get('ec2_url'): + ec2 = ec2_connect(module) + elif module.params.get('ec2_url'): + ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs) + + if 'region' not in aws_connect_kwargs: + aws_connect_kwargs['region'] = ec2.region + + vpc = connect_vpc(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc()) + + tagged_instances = [] + + state = module.params['state'] + + if state == 'absent': + instance_ids = module.params['instance_ids'] + if not instance_ids: + module.fail_json(msg='instance_ids list is required for absent state') + + (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) + + elif state in ('running', 'stopped'): + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) + + (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags) + + elif state in ('restarted'): + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) + + (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags) + + elif state == 'present': + # Changed is always set to true when provisioning new instances + if not module.params.get('image'): + module.fail_json(msg='image parameter is required for new instance') + + if module.params.get('exact_count') is None: + (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc) + else: + (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc) + + # Always return instances in the same order + if new_instance_ids: + new_instance_ids.sort() + if instance_dict_array: + instance_dict_array.sort(key=lambda x: x['id']) + if tagged_instances: + tagged_instances.sort(key=lambda x: x['id']) + + module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_ami_info.py b/test/support/integration/plugins/modules/ec2_ami_info.py new file mode 100644 index 00000000..53c2374d --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_ami_info.py @@ -0,0 +1,282 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_ami_info +version_added: '2.5' +short_description: Gather information about ec2 AMIs +description: + - Gather information about ec2 AMIs + - This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change. +author: + - Prasad Katti (@prasadkatti) +requirements: [ boto3 ] +options: + image_ids: + description: One or more image IDs. + aliases: [image_id] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters. + - Filter names and values are case sensitive. + type: dict + owners: + description: + - Filter the images by the owner. Valid options are an AWS account ID, self, + or an AWS owner alias ( amazon | aws-marketplace | microsoft ). + aliases: [owner] + type: list + elements: str + executable_users: + description: + - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs). + aliases: [executable_user] + type: list + elements: str + describe_image_attributes: + description: + - Describe attributes (like launchPermission) of the images found. + default: no + type: bool + +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: gather information about an AMI using ami-id + ec2_ami_info: + image_ids: ami-5b488823 + +- name: gather information about all AMIs with tag key Name and value webapp + ec2_ami_info: + filters: + "tag:Name": webapp + +- name: gather information about an AMI with 'AMI Name' equal to foobar + ec2_ami_info: + filters: + name: foobar + +- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477) + ec2_ami_info: + owners: 099720109477 + filters: + name: "ubuntu/images/ubuntu-zesty-17.04-*" +''' + +RETURN = ''' +images: + description: A list of images. + returned: always + type: list + elements: dict + contains: + architecture: + description: The architecture of the image. + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries. + returned: always + type: list + elements: dict + contains: + device_name: + description: The device name exposed to the instance. + returned: always + type: str + sample: /dev/sda1 + ebs: + description: EBS volumes + returned: always + type: complex + creation_date: + description: The date and time the image was created. + returned: always + type: str + sample: '2017-10-16T19:22:13.000Z' + description: + description: The description of the AMI. + returned: always + type: str + sample: '' + ena_support: + description: Whether enhanced networking with ENA is enabled. + returned: always + type: bool + sample: true + hypervisor: + description: The hypervisor type of the image. + returned: always + type: str + sample: xen + image_id: + description: The ID of the AMI. + returned: always + type: str + sample: ami-5b466623 + image_location: + description: The location of the AMI. + returned: always + type: str + sample: 408466080000/Webapp + image_type: + description: The type of image. + returned: always + type: str + sample: machine + launch_permissions: + description: A List of AWS accounts may launch the AMI. + returned: When image is owned by calling account and I(describe_image_attributes) is yes. + type: list + elements: dict + contains: + group: + description: A value of 'all' means the AMI is public. + type: str + user_id: + description: An AWS account ID with permissions to launch the AMI. + type: str + sample: [{"group": "all"}, {"user_id": "408466080000"}] + name: + description: The name of the AMI that was provided during image creation. + returned: always + type: str + sample: Webapp + owner_id: + description: The AWS account ID of the image owner. + returned: always + type: str + sample: '408466080000' + public: + description: Whether the image has public launch permissions. + returned: always + type: bool + sample: true + root_device_name: + description: The device name of the root device. + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + sriov_net_support: + description: Whether enhanced networking is enabled. + returned: always + type: str + sample: simple + state: + description: The current state of the AMI. + returned: always + type: str + sample: available + tags: + description: Any tags assigned to the image. + returned: always + type: dict + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict + + +def list_ec2_images(ec2_client, module): + + image_ids = module.params.get("image_ids") + owners = module.params.get("owners") + executable_users = module.params.get("executable_users") + filters = module.params.get("filters") + owner_param = [] + + # describe_images is *very* slow if you pass the `Owners` + # param (unless it's self), for some reason. + # Converting the owners to filters and removing from the + # owners param greatly speeds things up. + # Implementation based on aioue's suggestion in #24886 + for owner in owners: + if owner.isdigit(): + if 'owner-id' not in filters: + filters['owner-id'] = list() + filters['owner-id'].append(owner) + elif owner == 'self': + # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) + owner_param.append(owner) + else: + if 'owner-alias' not in filters: + filters['owner-alias'] = list() + filters['owner-alias'].append(owner) + + filters = ansible_dict_to_boto3_filter_list(filters) + + try: + images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users) + images = [camel_dict_to_snake_dict(image) for image in images["Images"]] + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, msg="error describing images") + for image in images: + try: + image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', [])) + if module.params.get("describe_image_attributes"): + launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions'] + image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] + except (ClientError, BotoCoreError) as err: + # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures + pass + + images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist + module.exit_json(images=images) + + +def main(): + + argument_spec = dict( + image_ids=dict(default=[], type='list', aliases=['image_id']), + filters=dict(default={}, type='dict'), + owners=dict(default=[], type='list', aliases=['owner']), + executable_users=dict(default=[], type='list', aliases=['executable_user']), + describe_image_attributes=dict(default=False, type='bool') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._module._name == 'ec2_ami_facts': + module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", + version='2.13', collection_name='ansible.builtin') + + ec2_client = module.client('ec2') + + list_ec2_images(ec2_client, module) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_group.py b/test/support/integration/plugins/modules/ec2_group.py new file mode 100644 index 00000000..bc416f66 --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_group.py @@ -0,0 +1,1345 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: ec2_group +author: "Andrew de Quincey (@adq)" +version_added: "1.3" +requirements: [ boto3 ] +short_description: maintain an ec2 VPC security group. +description: + - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5. +options: + name: + description: + - Name of the security group. + - One of and only one of I(name) or I(group_id) is required. + - Required if I(state=present). + required: false + type: str + group_id: + description: + - Id of group to delete (works only with absent). + - One of and only one of I(name) or I(group_id) is required. + required: false + version_added: "2.4" + type: str + description: + description: + - Description of the security group. Required when C(state) is C(present). + required: false + type: str + vpc_id: + description: + - ID of the VPC to create the group in. + required: false + type: str + rules: + description: + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, + no inbound rules will be enabled. Rules list may include its own name in `group_name`. + This allows idempotent loopback additions (e.g. allow group to access itself). + Rule sources list support was added in version 2.4. This allows to define multiple sources per + source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed. + In version 2.5 support for rule descriptions was added. + required: false + type: list + elements: dict + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: str + description: + - Name of the Security Group that traffic is coming from. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + from_port: + type: int + description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. + to_port: + type: int + description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. + rule_desc: + type: str + description: A description for the rule. + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, + a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. + Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions + was added. + required: false + version_added: "1.6" + type: list + elements: dict + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: str + description: + - Name of the Security Group that traffic is going to. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + from_port: + type: int + description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports. + to_port: + type: int + description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports. + rule_desc: + type: str + description: A description for the rule. + state: + version_added: "1.4" + description: + - Create or delete a security group. + required: false + default: 'present' + choices: [ "present", "absent" ] + aliases: [] + type: str + purge_rules: + version_added: "1.8" + description: + - Purge existing rules on security group that are not found in rules. + required: false + default: 'true' + aliases: [] + type: bool + purge_rules_egress: + version_added: "1.8" + description: + - Purge existing rules_egress on security group that are not found in rules_egress. + required: false + default: 'true' + aliases: [] + type: bool + tags: + version_added: "2.4" + description: + - A dictionary of one or more tags to assign to the security group. + required: false + type: dict + aliases: ['resource_tags'] + purge_tags: + version_added: "2.4" + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then + tags will not be modified. + required: false + default: yes + type: bool + +extends_documentation_fragment: + - aws + - ec2 + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. + - Preview diff mode support is added in version 2.7. +''' + +EXAMPLES = ''' +- name: example using security group rule descriptions + ec2_group: + name: "{{ name }}" + description: sg with rule descriptions + vpc_id: vpc-xxxxxxxx + profile: "{{ aws_profile }}" + region: us-east-1 + rules: + - proto: tcp + ports: + - 80 + cidr_ip: 0.0.0.0/0 + rule_desc: allow all on port 80 + +- name: example ec2 group + ec2_group: + name: example + description: an example EC2 group + vpc_id: 12345 + region: eu-west-1 + aws_secret_key: SECRET + aws_access_key: ACCESS + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + # this should only be needed for EC2 Classic security group rules + # because in a VPC an ELB will use a user-account security group + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123412341234/sg-87654321/exact-name-of-sg + - proto: udp + from_port: 10050 + to_port: 10050 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10051 + to_port: 10051 + group_id: sg-12345678 + - proto: icmp + from_port: 8 # icmp type, -1 = any type + to_port: -1 # icmp subtype, -1 = any subtype + cidr_ip: 10.0.0.0/8 + - proto: all + # the containing group name may be specified here + group_name: example + - proto: all + # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6), + # traffic on all ports is allowed, regardless of any ports you specify + from_port: 10050 # this value is ignored + to_port: 10050 # this value is ignored + cidr_ip: 10.0.0.0/8 + + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + cidr_ipv6: 64:ff9b::/96 + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group + +- name: example2 ec2 group + ec2_group: + name: example2 + description: an example2 EC2 group + vpc_id: 12345 + region: eu-west-1 + rules: + # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port). + - proto: tcp + ports: 22 + group_name: example-vpn + - proto: tcp + ports: + - 80 + - 443 + - 8080-8099 + cidr_ip: 0.0.0.0/0 + # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. + - proto: tcp + ports: + - 6379 + - 26379 + group_name: + - example-vpn + - example-redis + - proto: tcp + ports: 5665 + group_name: example-vpn + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + cidr_ipv6: + - 2607:F8B0::/32 + - 64:ff9b::/96 + group_id: + - sg-edcd9784 + diff: True + +- name: "Delete group by its id" + ec2_group: + region: eu-west-1 + group_id: sg-33b4ee5b + state: absent +''' + +RETURN = ''' +group_name: + description: Security group name + sample: My Security Group + type: str + returned: on create/update +group_id: + description: Security group id + sample: sg-abcd1234 + type: str + returned: on create/update +description: + description: Description of security group + sample: My Security Group + type: str + returned: on create/update +tags: + description: Tags associated with the security group + sample: + Name: My Security Group + Purpose: protecting stuff + type: dict + returned: on create/update +vpc_id: + description: ID of VPC to which the security group belongs + sample: vpc-abcd1234 + type: str + returned: on create/update +ip_permissions: + description: Inbound rules associated with the security group. + sample: + - from_port: 8182 + ip_protocol: tcp + ip_ranges: + - cidr_ip: "1.1.1.1/32" + ipv6_ranges: [] + prefix_list_ids: [] + to_port: 8182 + user_id_group_pairs: [] + type: list + returned: on create/update +ip_permissions_egress: + description: Outbound rules associated with the security group. + sample: + - ip_protocol: -1 + ip_ranges: + - cidr_ip: "0.0.0.0/0" + ipv6_ranges: [] + prefix_list_ids: [] + user_id_group_pairs: [] + type: list + returned: on create/update +owner_id: + description: AWS Account ID of the security group + sample: 123456789012 + type: int + returned: on create/update +''' + +import json +import re +import itertools +from copy import deepcopy +from time import sleep +from collections import namedtuple +from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible.module_utils.aws.iam import get_aws_account_id +from ansible.module_utils.aws.waiters import get_waiter +from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags +from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet +from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network +from ansible.module_utils._text import to_text +from ansible.module_utils.six import string_types + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description']) +valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix']) +current_account_id = None + + +def rule_cmp(a, b): + """Compare rules without descriptions""" + for prop in ['port_range', 'protocol', 'target', 'target_type']: + if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol): + # equal protocols can interchange `(-1, -1)` and `(None, None)` + if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)): + continue + elif getattr(a, prop) != getattr(b, prop): + return False + elif getattr(a, prop) != getattr(b, prop): + return False + return True + + +def rules_to_permissions(rules): + return [to_permission(rule) for rule in rules] + + +def to_permission(rule): + # take a Rule, output the serialized grant + perm = { + 'IpProtocol': rule.protocol, + } + perm['FromPort'], perm['ToPort'] = rule.port_range + if rule.target_type == 'ipv4': + perm['IpRanges'] = [{ + 'CidrIp': rule.target, + }] + if rule.description: + perm['IpRanges'][0]['Description'] = rule.description + elif rule.target_type == 'ipv6': + perm['Ipv6Ranges'] = [{ + 'CidrIpv6': rule.target, + }] + if rule.description: + perm['Ipv6Ranges'][0]['Description'] = rule.description + elif rule.target_type == 'group': + if isinstance(rule.target, tuple): + pair = {} + if rule.target[0]: + pair['UserId'] = rule.target[0] + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + if rule.target[1]: + pair['GroupId'] = rule.target[1] + elif rule.target[2]: + pair['GroupName'] = rule.target[2] + perm['UserIdGroupPairs'] = [pair] + else: + perm['UserIdGroupPairs'] = [{ + 'GroupId': rule.target + }] + if rule.description: + perm['UserIdGroupPairs'][0]['Description'] = rule.description + elif rule.target_type == 'ip_prefix': + perm['PrefixListIds'] = [{ + 'PrefixListId': rule.target, + }] + if rule.description: + perm['PrefixListIds'][0]['Description'] = rule.description + elif rule.target_type not in valid_targets: + raise ValueError('Invalid target type for rule {0}'.format(rule)) + return fix_port_and_protocol(perm) + + +def rule_from_group_permission(perm): + def ports_from_permission(p): + if 'FromPort' not in p and 'ToPort' not in p: + return (None, None) + return (int(perm['FromPort']), int(perm['ToPort'])) + + # outputs a rule tuple + for target_key, target_subkey, target_type in [ + ('IpRanges', 'CidrIp', 'ipv4'), + ('Ipv6Ranges', 'CidrIpv6', 'ipv6'), + ('PrefixListIds', 'PrefixListId', 'ip_prefix'), + ]: + if target_key not in perm: + continue + for r in perm[target_key]: + # there may be several IP ranges here, which is ok + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + r[target_subkey], + target_type, + r.get('Description') + ) + if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']: + for pair in perm['UserIdGroupPairs']: + target = ( + pair.get('UserId', None), + pair.get('GroupId', None), + pair.get('GroupName', None), + ) + if pair.get('UserId', '').startswith('amazon-'): + # amazon-elb and amazon-prefix rules don't need + # group-id specified, so remove it when querying + # from permission + target = ( + target[0], + None, + target[2], + ) + elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id: + target = ( + pair.get('UserId', None), + pair.get('GroupId', None), + pair.get('GroupName', None), + ) + + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + target, + 'group', + pair.get('Description') + ) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound']) +def get_security_groups_with_backoff(connection, **kwargs): + return connection.describe_security_groups(**kwargs) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def sg_exists_with_backoff(connection, **kwargs): + try: + return connection.describe_security_groups(**kwargs) + except is_boto3_error_code('InvalidGroup.NotFound'): + return {'SecurityGroups': []} + + +def deduplicate_rules_args(rules): + """Returns unique rules""" + if rules is None: + return None + return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) + + +def validate_rule(module, rule): + VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix', + 'group_id', 'group_name', 'group_desc', + 'proto', 'from_port', 'to_port', 'rule_desc') + if not isinstance(rule, dict): + module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): + """ + Returns tuple of (target_type, target, group_created) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + """ + FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' + group_id = None + group_name = None + target_group_created = False + + validate_rule(module, rule) + if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + if group_id and group_name: + group_name = None + return 'group', (owner_id, group_id, group_name), False + elif 'group_id' in rule: + return 'group', rule['group_id'], False + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name == name: + group_id = group['GroupId'] + groups[group_id] = group + groups[group_name] = group + elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): + # both are VPC groups, this is ok + group_id = groups[group_name]['GroupId'] + elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): + # both are EC2 classic, this is ok + group_id = groups[group_name]['GroupId'] + else: + auto_group = None + filters = {'group-name': group_name} + if vpc_id: + filters['vpc-id'] = vpc_id + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + if not rule.get('group_desc', '').strip(): + # retry describing the group once + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): + module.fail_json(msg="group %s will be automatically created by rule %s but " + "no description was provided" % (group_name, rule)) + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + elif not module.check_mode: + params = dict(GroupName=group_name, Description=rule['group_desc']) + if vpc_id: + params['VpcId'] = vpc_id + try: + auto_group = client.create_security_group(**params) + get_waiter( + client, 'security_group_exists', + ).wait( + GroupIds=[auto_group['GroupId']], + ) + except is_boto3_error_code('InvalidGroup.Duplicate'): + # The group exists, but didn't show up in any of our describe-security-groups calls + # Try searching on a filter for the name, and allow a retry window for AWS to update + # the model on their end. + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except IndexError as e: + module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + except ClientError as e: + module.fail_json_aws( + e, + msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + if auto_group is not None: + group_id = auto_group['GroupId'] + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True + return 'group', group_id, target_group_created + elif 'cidr_ip' in rule: + return 'ipv4', validate_ip(module, rule['cidr_ip']), False + elif 'cidr_ipv6' in rule: + return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False + elif 'ip_prefix' in rule: + return 'ip_prefix', rule['ip_prefix'], False + + module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule) + + +def ports_expand(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + if not isinstance(port, string_types): + ports_expanded.append((port,) * 2) + elif '-' in port: + ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1))) + else: + ports_expanded.append((int(port.strip()),) * 2) + + return ports_expanded + + +def rule_expand_ports(rule): + # takes a rule dict and returns a list of expanded rule dicts + if 'ports' not in rule: + if isinstance(rule.get('from_port'), string_types): + rule['from_port'] = int(rule.get('from_port')) + if isinstance(rule.get('to_port'), string_types): + rule['to_port'] = int(rule.get('to_port')) + return [rule] + + ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + + rule_expanded = [] + for from_to in ports_expand(ports): + temp_rule = rule.copy() + del temp_rule['ports'] + temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to) + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rules_expand_ports(rules): + # takes a list of rules and expands it based on 'ports' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_ports(rule_complex)] + + +def rule_expand_source(rule, source_type): + # takes a rule dict and returns a list of expanded rule dicts for specified source_type + sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] + source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') + + rule_expanded = [] + for source in sources: + temp_rule = rule.copy() + for s in source_types_all: + temp_rule.pop(s, None) + temp_rule[source_type] = source + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rule_expand_sources(rule): + # takes a rule dict and returns a list of expanded rule discts + source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule) + + return [r for stype in source_types + for r in rule_expand_source(rule, stype)] + + +def rules_expand_sources(rules): + # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_sources(rule_complex)] + + +def update_rules_description(module, client, rule_type, group_id, ip_permissions): + if module.check_mode: + return + try: + if rule_type == "in": + client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "out": + client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) + + +def fix_port_and_protocol(permission): + for key in ('FromPort', 'ToPort'): + if key in permission: + if permission[key] is None: + del permission[key] + else: + permission[key] = int(permission[key]) + + permission['IpProtocol'] = to_text(permission['IpProtocol']) + + return permission + + +def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id): + if revoke_ingress: + revoke(client, module, revoke_ingress, group_id, 'in') + if revoke_egress: + revoke(client, module, revoke_egress, group_id, 'out') + return bool(revoke_ingress or revoke_egress) + + +def revoke(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) + + +def add_new_permissions(client, module, new_ingress, new_egress, group_id): + if new_ingress: + authorize(client, module, new_ingress, group_id, 'in') + if new_egress: + authorize(client, module, new_egress, group_id, 'out') + return bool(new_ingress or new_egress) + + +def authorize(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) + + +def validate_ip(module, cidr_ip): + split_addr = cidr_ip.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set + # Get the network bits if IPv4, and validate if IPv6. + try: + ip = to_subnet(split_addr[0], split_addr[1]) + if ip != cidr_ip: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format( + cidr_ip, ip)) + except ValueError: + # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here + try: + isinstance(ip_network(to_text(cidr_ip)), IPv6Network) + ip = cidr_ip + except ValueError: + # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError + # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits + ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] + if ip6 != cidr_ip: + module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6)) + return ip6 + return ip + return cidr_ip + + +def update_tags(client, module, group_id, current_tags, tags, purge_tags): + tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) + + if not module.check_mode: + if tags_to_delete: + try: + client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete]) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete)) + + # Add/update tags + if tags_need_modify: + try: + client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + except (BotoCoreError, ClientError) as e: + module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify)) + + return bool(tags_need_modify or tags_to_delete) + + +def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list): + changed = False + client = module.client('ec2') + ingress_needs_desc_update = [] + egress_needs_desc_update = [] + + for present_rule in present_egress: + needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_egress_list.remove(r) + egress_needs_desc_update.extend(needs_update) + for present_rule in present_ingress: + needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_ingress_list.remove(r) + ingress_needs_desc_update.extend(needs_update) + + if ingress_needs_desc_update: + update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update)) + changed |= True + if egress_needs_desc_update: + update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update)) + changed |= True + return changed + + +def create_security_group(client, module, name, description, vpc_id): + if not module.check_mode: + params = dict(GroupName=name, Description=description) + if vpc_id: + params['VpcId'] = vpc_id + try: + group = client.create_security_group(**params) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to create security group") + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + # amazon sometimes takes a couple seconds to update the security group so wait till it exists + while True: + sleep(3) + group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + if group.get('VpcId') and not group.get('IpPermissionsEgress'): + pass + else: + break + return group + return None + + +def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress): + group_id = group['GroupId'] + tries = 6 + + def await_rules(group, desired_rules, purge, rule_key): + for i in range(tries): + current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], [])) + if purge and len(current_rules ^ set(desired_rules)) == 0: + return group + elif purge: + conflicts = current_rules ^ set(desired_rules) + # For cases where set comparison is equivalent, but invalid port/proto exist + for a, b in itertools.combinations(conflicts, 2): + if rule_cmp(a, b): + conflicts.discard(a) + conflicts.discard(b) + if not len(conflicts): + return group + elif current_rules.issuperset(desired_rules) and not purge: + return group + sleep(10) + group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0] + module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules)) + return group + + group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0] + if 'VpcId' in group and module.params.get('rules_egress') is not None: + group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress') + return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions') + + +def group_exists(client, module, vpc_id, group_id, name): + params = {'Filters': []} + if group_id: + params['GroupIds'] = [group_id] + if name: + # Add name to filters rather than params['GroupNames'] + # because params['GroupNames'] only checks the default vpc if no vpc is provided + params['Filters'].append({'Name': 'group-name', 'Values': [name]}) + if vpc_id: + params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]}) + # Don't filter by description to maintain backwards compatibility + + try: + security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', []) + all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', []) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error in describe_security_groups") + + if security_groups: + groups = dict((group['GroupId'], group) for group in all_groups) + groups.update(dict((group['GroupName'], group) for group in all_groups)) + if vpc_id: + vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id) + groups.update(vpc_wins) + # maintain backwards compatibility by using the last matching group + return security_groups[-1], groups + return None, {} + + +def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress): + if not hasattr(client, "update_security_group_rule_descriptions_egress"): + all_rules = rules if rules else [] + rules_egress if rules_egress else [] + if any('rule_desc' in rule for rule in all_rules): + module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.") + + +def get_diff_final_resource(client, module, security_group): + def get_account_id(security_group, module): + try: + owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account']) + except (BotoCoreError, ClientError) as e: + owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) + return owner_id + + def get_final_tags(security_group_tags, specified_tags, purge_tags): + if specified_tags is None: + return security_group_tags + tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags) + end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete) + end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete)) + end_result_tags.update(tags_need_modify) + return end_result_tags + + def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules): + if specified_rules is None: + return security_group_rules + if purge_rules: + final_rules = [] + else: + final_rules = list(security_group_rules) + specified_rules = flatten_nested_targets(module, deepcopy(specified_rules)) + for rule in specified_rules: + format_rule = { + 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'), + 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': [] + } + if rule.get('proto', 'tcp') in ('all', '-1', -1): + format_rule['ip_protocol'] = '-1' + format_rule.pop('from_port') + format_rule.pop('to_port') + elif rule.get('ports'): + if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)): + rule['ports'] = [rule['ports']] + for port in rule.get('ports'): + if isinstance(port, string_types) and '-' in port: + format_rule['from_port'], format_rule['to_port'] = port.split('-') + else: + format_rule['from_port'] = format_rule['to_port'] = port + elif rule.get('from_port') or rule.get('to_port'): + format_rule['from_port'] = rule.get('from_port', rule.get('to_port')) + format_rule['to_port'] = rule.get('to_port', rule.get('from_port')) + for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'): + if rule.get(source_type): + rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type) + if rule.get('rule_desc'): + format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}] + else: + if not isinstance(rule[source_type], list): + rule[source_type] = [rule[source_type]] + format_rule[rule_key] = [{source_type: target} for target in rule[source_type]] + if rule.get('group_id') or rule.get('group_name'): + rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0]) + format_rule['user_id_group_pairs'] = [{ + 'description': rule_sg.get('description', rule_sg.get('group_desc')), + 'group_id': rule_sg.get('group_id', rule.get('group_id')), + 'group_name': rule_sg.get('group_name', rule.get('group_name')), + 'peering_status': rule_sg.get('peering_status'), + 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)), + 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']), + 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id') + }] + for k, v in list(format_rule['user_id_group_pairs'][0].items()): + if v is None: + format_rule['user_id_group_pairs'][0].pop(k) + final_rules.append(format_rule) + # Order final rules consistently + final_rules.sort(key=get_ip_permissions_sort_key) + return final_rules + security_group_ingress = security_group.get('ip_permissions', []) + specified_ingress = module.params['rules'] + purge_ingress = module.params['purge_rules'] + security_group_egress = security_group.get('ip_permissions_egress', []) + specified_egress = module.params['rules_egress'] + purge_egress = module.params['purge_rules_egress'] + return { + 'description': module.params['description'], + 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'), + 'group_name': security_group.get('group_name', module.params['name']), + 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), + 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), + 'owner_id': get_account_id(security_group, module), + 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']), + 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])} + + +def flatten_nested_targets(module, rules): + def _flatten(targets): + for target in targets: + if isinstance(target, list): + for t in _flatten(target): + yield t + elif isinstance(target, string_types): + yield target + + if rules is not None: + for rule in rules: + target_list_type = None + if isinstance(rule.get('cidr_ip'), list): + target_list_type = 'cidr_ip' + elif isinstance(rule.get('cidr_ipv6'), list): + target_list_type = 'cidr_ipv6' + if target_list_type is not None: + rule[target_list_type] = list(_flatten(rule[target_list_type])) + return rules + + +def get_rule_sort_key(dicts): + if dicts.get('cidr_ip'): + return dicts.get('cidr_ip') + elif dicts.get('cidr_ipv6'): + return dicts.get('cidr_ipv6') + elif dicts.get('prefix_list_id'): + return dicts.get('prefix_list_id') + elif dicts.get('group_id'): + return dicts.get('group_id') + return None + + +def get_ip_permissions_sort_key(rule): + if rule.get('ip_ranges'): + rule.get('ip_ranges').sort(key=get_rule_sort_key) + return rule.get('ip_ranges')[0]['cidr_ip'] + elif rule.get('ipv6_ranges'): + rule.get('ipv6_ranges').sort(key=get_rule_sort_key) + return rule.get('ipv6_ranges')[0]['cidr_ipv6'] + elif rule.get('prefix_list_ids'): + rule.get('prefix_list_ids').sort(key=get_rule_sort_key) + return rule.get('prefix_list_ids')[0]['prefix_list_id'] + elif rule.get('user_id_group_pairs'): + rule.get('user_id_group_pairs').sort(key=get_rule_sort_key) + return rule.get('user_id_group_pairs')[0]['group_id'] + return None + + +def main(): + argument_spec = dict( + name=dict(), + group_id=dict(), + description=dict(), + vpc_id=dict(), + rules=dict(type='list'), + rules_egress=dict(type='list'), + state=dict(default='present', type='str', choices=['present', 'absent']), + purge_rules=dict(default=True, required=False, type='bool'), + purge_rules_egress=dict(default=True, required=False, type='bool'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, required=False, type='bool') + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['name', 'group_id']], + required_if=[['state', 'present', ['name']]], + ) + + name = module.params['name'] + group_id = module.params['group_id'] + description = module.params['description'] + vpc_id = module.params['vpc_id'] + rules = flatten_nested_targets(module, deepcopy(module.params['rules'])) + rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress'])) + rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules))) + rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress))) + state = module.params.get('state') + purge_rules = module.params['purge_rules'] + purge_rules_egress = module.params['purge_rules_egress'] + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + + if state == 'present' and not description: + module.fail_json(msg='Must provide description when state is present.') + + changed = False + client = module.client('ec2') + + verify_rules_with_descriptions_permitted(client, module, rules, rules_egress) + group, groups = group_exists(client, module, vpc_id, group_id, name) + group_created_new = not bool(group) + + global current_account_id + current_account_id = get_aws_account_id(module) + + before = {} + after = {} + + # Ensure requested group is absent + if state == 'absent': + if group: + # found a match, delete it + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + try: + if not module.check_mode: + client.delete_security_group(GroupId=group['GroupId']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group) + else: + group = None + changed = True + else: + # no match found, no changes required + pass + + # Ensure requested group is present + elif state == 'present': + if group: + # existing group + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + if group['Description'] != description: + module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") + else: + # no match found, create it + group = create_security_group(client, module, name, description, vpc_id) + changed = True + + if tags is not None and group is not None: + current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) + changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags) + + if group: + named_tuple_ingress_list = [] + named_tuple_egress_list = [] + current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], []) + current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], []) + + for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list), + (rules_egress, 'out', named_tuple_egress_list)]: + if new_rules is None: + continue + for rule in new_rules: + target_type, target, target_group_created = get_target_from_rule( + module, client, rule, name, group, groups, vpc_id) + changed |= target_group_created + + if rule.get('proto', 'tcp') in ('all', '-1', -1): + rule['proto'] = '-1' + rule['from_port'] = None + rule['to_port'] = None + try: + int(rule.get('proto', 'tcp')) + rule['proto'] = to_text(rule.get('proto', 'tcp')) + rule['from_port'] = None + rule['to_port'] = None + except ValueError: + # rule does not use numeric protocol spec + pass + + named_tuple_rule_list.append( + Rule( + port_range=(rule['from_port'], rule['to_port']), + protocol=to_text(rule.get('proto', 'tcp')), + target=target, target_type=target_type, + description=rule.get('rule_desc'), + ) + ) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] + + if module.params.get('rules_egress') is None and 'VpcId' in group: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + if rule in current_egress: + named_tuple_egress_list.append(rule) + if rule not in current_egress: + current_egress.append(rule) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) + present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) + + if purge_rules: + revoke_ingress = [] + for p in present_ingress: + if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]): + revoke_ingress.append(to_permission(p)) + else: + revoke_ingress = [] + if purge_rules_egress and module.params.get('rules_egress') is not None: + if module.params.get('rules_egress') is []: + revoke_egress = [ + to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list) + if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + ] + else: + revoke_egress = [] + for p in present_egress: + if not any([rule_cmp(p, b) for b in named_tuple_egress_list]): + revoke_egress.append(to_permission(p)) + else: + revoke_egress = [] + + # named_tuple_ingress_list and named_tuple_egress_list got updated by + # method update_rule_descriptions, deep copy these two lists to new + # variables for the record of the 'desired' ingress and egress sg permissions + desired_ingress = deepcopy(named_tuple_ingress_list) + desired_egress = deepcopy(named_tuple_egress_list) + + changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list) + + # Revoke old rules + changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId']) + rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress) + + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) + new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) + # Authorize new rules + changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId']) + + if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None: + # A new group with no rules provided is already being awaited. + # When it is created we wait for the default egress rule to be added by AWS + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + elif changed and not module.check_mode: + # keep pulling until current security group rules match the desired ingress and egress rules + security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress) + else: + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags']) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', [])) + + else: + security_group = {'group_id': None} + + if module._diff: + if module.params['state'] == 'present': + after = get_diff_final_resource(client, module, security_group) + if before.get('ip_permissions'): + before['ip_permissions'].sort(key=get_ip_permissions_sort_key) + + security_group['diff'] = [{'before': before, 'after': after}] + + module.exit_json(changed=changed, **security_group) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_vpc_net.py b/test/support/integration/plugins/modules/ec2_vpc_net.py new file mode 100644 index 00000000..30e4b1e9 --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_vpc_net.py @@ -0,0 +1,524 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +short_description: Configure AWS virtual private clouds +description: + - Create, modify, and terminate AWS virtual private clouds. +version_added: "2.0" +author: + - Jonathan Davila (@defionscode) + - Sloane Hertel (@s-hertel) +options: + name: + description: + - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists. + required: yes + type: str + cidr_block: + description: + - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR + and is used in conjunction with the C(name) to ensure idempotence. + required: yes + type: list + elements: str + ipv6_cidr: + description: + - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses, + or the size of the CIDR block. + default: False + type: bool + version_added: '2.10' + purge_cidrs: + description: + - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block). + default: no + type: bool + version_added: '2.5' + tenancy: + description: + - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. + default: default + choices: [ 'default', 'dedicated' ] + type: str + dns_support: + description: + - Whether to enable AWS DNS support. + default: yes + type: bool + dns_hostnames: + description: + - Whether to enable AWS hostname support. + default: yes + type: bool + dhcp_opts_id: + description: + - The id of the DHCP options to use for this VPC. + type: str + tags: + description: + - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of + the VPC if it's different. + aliases: [ 'resource_tags' ] + type: dict + state: + description: + - The state of the VPC. Either absent or present. + default: present + choices: [ 'present', 'absent' ] + type: str + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want + duplicate VPCs created. + type: bool + default: false +requirements: + - boto3 + - botocore +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: create a VPC with dedicated tenancy and a couple of tags + ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + region: us-east-1 + tags: + module: ec2_vpc_net + this: works + tenancy: dedicated + +- name: create a VPC with dedicated tenancy and request an IPv6 CIDR + ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + ipv6_cidr: True + region: us-east-1 + tenancy: dedicated +''' + +RETURN = ''' +vpc: + description: info about the VPC that was created or deleted + returned: always + type: complex + contains: + cidr_block: + description: The CIDR of the VPC + returned: always + type: str + sample: 10.0.0.0/16 + cidr_block_association_set: + description: IPv4 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "cidr_block": "20.0.0.0/24", + "cidr_block_state": { + "state": "associated" + } + } + ] + classic_link_enabled: + description: indicates whether ClassicLink is enabled + returned: always + type: bool + sample: false + dhcp_options_id: + description: the id of the DHCP options associated with this VPC + returned: always + type: str + sample: dopt-0fb8bd6b + id: + description: VPC resource id + returned: always + type: str + sample: vpc-c2e00da5 + instance_tenancy: + description: indicates whether VPC uses default or dedicated tenancy + returned: always + type: str + sample: default + ipv6_cidr_block_association_set: + description: IPv6 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "ipv6_cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "ipv6_cidr_block": "2001:db8::/56", + "ipv6_cidr_block_state": { + "state": "associated" + } + } + ] + is_default: + description: indicates whether this is the default VPC + returned: always + type: bool + sample: false + state: + description: state of the VPC + returned: always + type: str + sample: available + tags: + description: tags attached to the VPC, includes name + returned: always + type: complex + contains: + Name: + description: name tag for the VPC + returned: always + type: str + sample: pk_vpc4 +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from time import sleep, time +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags, + ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict) +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native +from ansible.module_utils.network.common.utils import to_subnet + + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns None or a vpc object depending on the existence of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return None. + """ + try: + matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs'] + # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block) + if not matching_vpcs: + matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + if multi: + return None + elif len(matching_vpcs) == 1: + return matching_vpcs[0]['VpcId'] + elif len(matching_vpcs) > 1: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + return None + + +@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound']) +def get_classic_link_with_backoff(connection, vpc_id): + try: + return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled') + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.": + return False + else: + raise + + +def get_vpc(module, connection, vpc_id): + # wait for vpc to be available + try: + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id)) + + try: + vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + try: + vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + return vpc_obj + + +def update_vpc_tags(connection, module, vpc_id, tags, name): + if tags is None: + tags = dict() + + tags.update({'Name': name}) + tags = dict((k, to_native(v)) for k, v in tags.items()) + try: + current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags']) + tags_to_update, dummy = compare_aws_tags(current_tags, tags, False) + if tags_to_update: + if not module.check_mode: + tags = ansible_dict_to_boto3_tag_list(tags_to_update) + vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True) + + # Wait for tags to be updated + expected_tags = boto3_tag_list_to_ansible_dict(tags) + filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()] + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters) + + return True + else: + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update tags") + + +def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + if vpc_obj['DhcpOptionsId'] != dhcp_id: + if not module.check_mode: + try: + connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id)) + + try: + # Wait for DhcpOptionsId to be updated + filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}] + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated") + + return True + else: + return False + + +def create_vpc(connection, module, cidr_block, tenancy): + try: + if not module.check_mode: + vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy) + else: + module.exit_json(changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to create the VPC") + + # wait for vpc to exist + try: + connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId'])) + + return vpc_obj['Vpc']['VpcId'] + + +def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value): + start_time = time() + updated = False + while time() < start_time + 300: + current_value = connection.describe_vpc_attribute( + Attribute=attribute, + VpcId=vpc_id + )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value'] + if current_value != expected_value: + sleep(3) + else: + updated = True + break + if not updated: + module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute)) + + +def get_cidr_network_bits(module, cidr_block): + fixed_cidrs = [] + for cidr in cidr_block: + split_addr = cidr.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 CIDR that may or may not have host bits set + # Get the network bits. + valid_cidr = to_subnet(split_addr[0], split_addr[1]) + if cidr != valid_cidr: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr)) + fixed_cidrs.append(valid_cidr) + else: + # let AWS handle invalid CIDRs + fixed_cidrs.append(cidr) + return fixed_cidrs + + +def main(): + argument_spec = dict( + name=dict(required=True), + cidr_block=dict(type='list', required=True), + ipv6_cidr=dict(type='bool', default=False), + tenancy=dict(choices=['default', 'dedicated'], default='default'), + dns_support=dict(type='bool', default=True), + dns_hostnames=dict(type='bool', default=True), + dhcp_opts_id=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + state=dict(choices=['present', 'absent'], default='present'), + multi_ok=dict(type='bool', default=False), + purge_cidrs=dict(type='bool', default=False), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + name = module.params.get('name') + cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) + ipv6_cidr = module.params.get('ipv6_cidr') + purge_cidrs = module.params.get('purge_cidrs') + tenancy = module.params.get('tenancy') + dns_support = module.params.get('dns_support') + dns_hostnames = module.params.get('dns_hostnames') + dhcp_id = module.params.get('dhcp_opts_id') + tags = module.params.get('tags') + state = module.params.get('state') + multi = module.params.get('multi_ok') + + changed = False + + connection = module.client( + 'ec2', + retry_decorator=AWSRetry.jittered_backoff( + retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'] + ) + ) + + if dns_hostnames and not dns_support: + module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support') + + if state == 'present': + + # Check if VPC exists + vpc_id = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_id is None: + vpc_id = create_vpc(connection, module, cidr_block[0], tenancy) + changed = True + + vpc_obj = get_vpc(module, connection, vpc_id) + + associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) + if cidr['CidrBlockState']['State'] != 'disassociated') + to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs] + to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block] + expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add + + if len(cidr_block) > 1: + for cidr in to_add: + changed = True + try: + connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) + if ipv6_cidr: + if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys(): + module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format( + vpc_id, + vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock'])) + else: + try: + connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) + + if purge_cidrs: + for association_id in to_remove: + changed = True + try: + connection.disassociate_vpc_cidr_block(AssociationId=association_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " + "are associated with the CIDR block before you can disassociate it.".format(association_id)) + + if dhcp_id is not None: + try: + if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update DHCP options") + + if tags is not None or name is not None: + try: + if update_vpc_tags(connection, module, vpc_id, tags, name): + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update tags") + + current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value'] + current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value'] + if current_dns_enabled != dns_support: + changed = True + if not module.check_mode: + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns support attribute") + if current_dns_hostnames != dns_hostnames: + changed = True + if not module.check_mode: + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute") + + # wait for associated cidrs to match + if to_add or to_remove: + try: + connection.get_waiter('vpc_available').wait( + VpcIds=[vpc_id], + Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to wait for CIDRs to update") + + # try to wait for enableDnsSupport and enableDnsHostnames to match + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) + + final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id)) + final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', [])) + final_state['id'] = final_state.pop('vpc_id') + + module.exit_json(changed=changed, vpc=final_state) + + elif state == 'absent': + + # Check if VPC exists + vpc_id = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_id is not None: + try: + if not module.check_mode: + connection.delete_vpc(VpcId=vpc_id) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id)) + + module.exit_json(changed=changed, vpc={}) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_vpc_subnet.py b/test/support/integration/plugins/modules/ec2_vpc_subnet.py new file mode 100644 index 00000000..5085e99b --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_vpc_subnet.py @@ -0,0 +1,604 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet +short_description: Manage subnets in AWS virtual private clouds +description: + - Manage subnets in AWS virtual private clouds. +version_added: "2.0" +author: +- Robert Estelle (@erydo) +- Brad Davidson (@brandond) +requirements: [ boto3 ] +options: + az: + description: + - "The availability zone for the subnet." + type: str + cidr: + description: + - "The CIDR block for the subnet. E.g. 192.0.2.0/24." + type: str + required: true + ipv6_cidr: + description: + - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range." + - "Required if I(assign_instances_ipv6=true)" + version_added: "2.5" + type: str + tags: + description: + - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed." + aliases: [ 'resource_tags' ] + type: dict + state: + description: + - "Create or remove the subnet." + default: present + choices: [ 'present', 'absent' ] + type: str + vpc_id: + description: + - "VPC ID of the VPC in which to create or delete the subnet." + required: true + type: str + map_public: + description: + - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default." + type: bool + default: 'no' + version_added: "2.4" + assign_instances_ipv6: + description: + - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address." + type: bool + default: false + version_added: "2.5" + wait: + description: + - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing." + type: bool + default: true + version_added: "2.5" + wait_timeout: + description: + - "Number of seconds to wait for subnet to become available I(wait=True)." + default: 300 + version_added: "2.5" + type: int + purge_tags: + description: + - Whether or not to remove tags that do not appear in the I(tags) list. + type: bool + default: true + version_added: "2.5" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create subnet for database servers + ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + tags: + Name: Database Subnet + register: database_subnet + +- name: Remove subnet for database servers + ec2_vpc_subnet: + state: absent + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + +- name: Create subnet with IPv6 block assigned + ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: 2001:db8:0:102::/64 + +- name: Remove IPv6 block assigned to subnet + ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: '' +''' + +RETURN = ''' +subnet: + description: Dictionary of subnet values + returned: I(state=present) + type: complex + contains: + id: + description: Subnet resource id + returned: I(state=present) + type: str + sample: subnet-b883b2c4 + cidr_block: + description: The IPv4 CIDR of the Subnet + returned: I(state=present) + type: str + sample: "10.0.0.0/16" + ipv6_cidr_block: + description: The IPv6 CIDR block actively associated with the Subnet + returned: I(state=present) + type: str + sample: "2001:db8:0:102::/64" + availability_zone: + description: Availability zone of the Subnet + returned: I(state=present) + type: str + sample: us-east-1a + state: + description: state of the Subnet + returned: I(state=present) + type: str + sample: available + tags: + description: tags attached to the Subnet, includes name + returned: I(state=present) + type: dict + sample: {"Name": "My Subnet", "env": "staging"} + map_public_ip_on_launch: + description: whether public IP is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + assign_ipv6_address_on_creation: + description: whether IPv6 address is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + vpc_id: + description: the id of the VPC where this Subnet exists + returned: I(state=present) + type: str + sample: vpc-67236184 + available_ip_address_count: + description: number of available IPv4 addresses + returned: I(state=present) + type: str + sample: 251 + default_for_az: + description: indicates whether this is the default Subnet for this Availability Zone + returned: I(state=present) + type: bool + sample: false + ipv6_association_id: + description: The IPv6 association ID for the currently associated CIDR + returned: I(state=present) + type: str + sample: subnet-cidr-assoc-b85c74d2 + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: I(state=present) + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the subnet. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str +''' + + +import time + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils._text import to_text +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.aws.waiters import get_waiter +from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list, + camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry) + + +def get_subnet_info(subnet): + if 'Subnets' in subnet: + return [get_subnet_info(s) for s in subnet['Subnets']] + elif 'Subnet' in subnet: + subnet = camel_dict_to_snake_dict(subnet['Subnet']) + else: + subnet = camel_dict_to_snake_dict(subnet) + + if 'tags' in subnet: + subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags']) + else: + subnet['tags'] = dict() + + if 'subnet_id' in subnet: + subnet['id'] = subnet['subnet_id'] + del subnet['subnet_id'] + + subnet['ipv6_cidr_block'] = '' + subnet['ipv6_association_id'] = '' + ipv6set = subnet.get('ipv6_cidr_block_association_set') + if ipv6set: + for item in ipv6set: + if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'): + subnet['ipv6_cidr_block'] = item['ipv6_cidr_block'] + subnet['ipv6_association_id'] = item['association_id'] + + return subnet + + +@AWSRetry.exponential_backoff() +def describe_subnets_with_backoff(client, **params): + return client.describe_subnets(**params) + + +def waiter_params(module, params, start_time): + if not module.botocore_at_least("1.7.0"): + remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time()) + params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5} + return params + + +def handle_waiter(conn, module, waiter_name, params, start_time): + try: + get_waiter(conn, waiter_name).wait( + **waiter_params(module, params, start_time) + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, "Failed to wait for updates to complete") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "An exception happened while trying to wait for updates") + + +def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None): + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] + + params = dict(VpcId=vpc_id, + CidrBlock=cidr) + + if ipv6_cidr: + params['Ipv6CidrBlock'] = ipv6_cidr + + if az: + params['AvailabilityZone'] = az + + try: + subnet = get_subnet_info(conn.create_subnet(**params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create subnet") + + # Sometimes AWS takes its time to create a subnet and so using + # new subnets's id to do things like create tags results in + # exception. + if wait and subnet.get('state') != 'available': + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + try: + conn.get_waiter('subnet_available').wait( + **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time) + ) + subnet['state'] = 'available' + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available") + + return subnet + + +def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): + changed = False + + filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'}) + try: + cur_tags = conn.describe_tags(Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe tags") + + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) + + if to_update: + try: + if not module.check_mode: + AWSRetry.exponential_backoff( + catch_extra_error_codes=['InvalidSubnetID.NotFound'] + )(conn.create_tags)( + Resources=[subnet['id']], + Tags=ansible_dict_to_boto3_tag_list(to_update) + ) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create tags") + + if to_delete: + try: + if not module.check_mode: + tags_list = [] + for key in to_delete: + tags_list.append({'Key': key}) + + AWSRetry.exponential_backoff( + catch_extra_error_codes=['InvalidSubnetID.NotFound'] + )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete tags") + + if module.params['wait'] and not module.check_mode: + # Wait for tags to be updated + filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()] + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + return changed + + +def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def disassociate_ipv6_cidr(conn, module, subnet, start_time): + if subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time) + + try: + conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}" + .format(subnet['ipv6_association_id'], subnet['id'])) + + # Wait for cidr block to be disassociated + if module.params['wait']: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['disassociated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + +def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time): + wait = module.params['wait'] + changed = False + + if subnet['ipv6_association_id'] and not ipv6_cidr: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + if ipv6_cidr: + filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr, + 'vpc-id': subnet['vpc_id']}) + + try: + check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get subnet info") + + if check_subnets and check_subnets[0]['ipv6_cidr_block']: + module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr)) + + if subnet['ipv6_association_id']: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + try: + if not check_mode: + associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id'])) + else: + if not check_mode and wait: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['associated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'): + subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId'] + subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + if subnet['ipv6_cidr_block_association_set']: + subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']) + else: + subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])) + + return changed + + +def get_matching_subnet(conn, module, vpc_id, cidr): + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr}) + try: + subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get matching subnet") + + if subnets: + return subnets[0] + + return None + + +def ensure_subnet_present(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + changed = False + + # Initialize start so max time does not exceed the specified wait_timeout for multiple operations + start_time = time.time() + + if subnet is None: + if not module.check_mode: + subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'], + ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time) + changed = True + # Subnet will be None when check_mode is true + if subnet is None: + return { + 'changed': changed, + 'subnet': {} + } + if module.params['wait']: + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'): + if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time): + changed = True + + if module.params['map_public'] != subnet['map_public_ip_on_launch']: + ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time) + changed = True + + if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time) + changed = True + + if module.params['tags'] != subnet['tags']: + stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items()) + if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time): + changed = True + + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if not module.check_mode and module.params['wait']: + # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation + # so we only wait for those if necessary just before returning the subnet + subnet = ensure_final_subnet(conn, module, subnet, start_time) + + return { + 'changed': changed, + 'subnet': subnet + } + + +def ensure_final_subnet(conn, module, subnet, start_time): + for rewait in range(0, 30): + map_public_correct = False + assign_ipv6_correct = False + + if module.params['map_public'] == subnet['map_public_ip_on_launch']: + map_public_correct = True + else: + if module.params['map_public']: + handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'): + assign_ipv6_correct = True + else: + if module.params['assign_instances_ipv6']: + handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + + if map_public_correct and assign_ipv6_correct: + break + + time.sleep(5) + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + + return subnet + + +def ensure_subnet_absent(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if subnet is None: + return {'changed': False} + + try: + if not module.check_mode: + conn.delete_subnet(SubnetId=subnet['id']) + if module.params['wait']: + handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time()) + return {'changed': True} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete subnet") + + +def main(): + argument_spec = dict( + az=dict(default=None, required=False), + cidr=dict(required=True), + ipv6_cidr=dict(default='', required=False), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']), + vpc_id=dict(required=True), + map_public=dict(default=False, required=False, type='bool'), + assign_instances_ipv6=dict(default=False, required=False, type='bool'), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300, required=False), + purge_tags=dict(default=True, type='bool') + ) + + required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + + if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'): + module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string") + + if not module.botocore_at_least("1.7.0"): + module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times") + + connection = module.client('ec2') + + state = module.params.get('state') + + try: + if state == 'present': + result = ensure_subnet_present(connection, module) + elif state == 'absent': + result = ensure_subnet_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/flatpak_remote.py b/test/support/integration/plugins/modules/flatpak_remote.py new file mode 100644 index 00000000..db208f1b --- /dev/null +++ b/test/support/integration/plugins/modules/flatpak_remote.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com> +# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net> +# Copyright: (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +# ATTENTION CONTRIBUTORS! +# +# TL;DR: Run this module's integration tests manually before opening a pull request +# +# Long explanation: +# The integration tests for this module are currently NOT run on the Ansible project's continuous +# delivery pipeline. So please: When you make changes to this module, make sure that you run the +# included integration tests manually for both Python 2 and Python 3: +# +# Python 2: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote +# Python 3: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote +# +# Because of external dependencies, the current integration tests are somewhat too slow and brittle +# to be included right now. I have plans to rewrite the integration tests based on a local flatpak +# repository so that they can be included into the normal CI pipeline. +# //oolongbrothers + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: flatpak_remote +version_added: '2.6' +short_description: Manage flatpak repository remotes +description: +- Allows users to add or remove flatpak remotes. +- The flatpak remotes concept is comparable to what is called repositories in other packaging + formats. +- Currently, remote addition is only supported via I(flatpakrepo) file URLs. +- Existing remotes will not be updated. +- See the M(flatpak) module for managing flatpaks. +author: +- John Kwiatkoski (@JayKayy) +- Alexander Bethke (@oolongbrothers) +requirements: +- flatpak +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + default: flatpak + flatpakrepo_url: + description: + - The URL to the I(flatpakrepo) file representing the repository remote to add. + - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) + is added using the specified installation C(method). + - When used with I(state=absent), this is not required. + - Required when I(state=present). + method: + description: + - The installation method to use. + - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) + or only for the current C(user). + choices: [ system, user ] + default: system + name: + description: + - The desired name for the flatpak remote to be registered under on the managed host. + - When used with I(state=present), the remote will be added to the managed host under + the specified I(name). + - When used with I(state=absent) the remote with that name will be removed. + required: true + state: + description: + - Indicates the desired package state. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = r''' +- name: Add the Gnome flatpak remote to the system installation + flatpak_remote: + name: gnome + state: present + flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo + +- name: Add the flathub flatpak repository remote to the user installation + flatpak_remote: + name: flathub + state: present + flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + method: user + +- name: Remove the Gnome flatpak remote from the user installation + flatpak_remote: + name: gnome + state: absent + method: user + +- name: Remove the flathub remote from the system installation + flatpak_remote: + name: flathub + state: absent +''' + +RETURN = r''' +command: + description: The exact flatpak command that was executed + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" +msg: + description: Module error message + returned: failure + type: str + sample: "Executable '/usr/local/bin/flatpak' was not found on the system." +rc: + description: Return code from flatpak binary + returned: When a flatpak command has been executed + type: int + sample: 0 +stderr: + description: Error output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" +stdout: + description: Output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" +''' + +import subprocess +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + + +def add_remote(module, binary, name, flatpakrepo_url, method): + """Add a new remote.""" + global result + command = "{0} remote-add --{1} {2} {3}".format( + binary, method, name, flatpakrepo_url) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remove_remote(module, binary, name, method): + """Remove an existing remote.""" + global result + command = "{0} remote-delete --{1} --force {2} ".format( + binary, method, name) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_exists(module, binary, name, method): + """Check if the remote exists.""" + command = "{0} remote-list -d --{1}".format(binary, method) + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return True + return False + + +def _flatpak_command(module, noop, command): + global result + if noop: + result['rc'] = 0 + result['command'] = command + return "" + + process = subprocess.Popen( + command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout_data, stderr_data = process.communicate() + result['rc'] = process.returncode + result['command'] = command + result['stdout'] = stdout_data + result['stderr'] = stderr_data + if result['rc'] != 0: + module.fail_json(msg="Failed to execute flatpak command", **result) + return to_native(stdout_data) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + flatpakrepo_url=dict(type='str'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default="present", + choices=['absent', 'present']), + executable=dict(type='str', default="flatpak") + ), + # This module supports check mode + supports_check_mode=True, + ) + + name = module.params['name'] + flatpakrepo_url = module.params['flatpakrepo_url'] + method = module.params['method'] + state = module.params['state'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + if flatpakrepo_url is None: + flatpakrepo_url = '' + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + remote_already_exists = remote_exists(module, binary, to_bytes(name), method) + + if state == 'present' and not remote_already_exists: + add_remote(module, binary, name, flatpakrepo_url, method) + elif state == 'absent' and remote_already_exists: + remove_remote(module, binary, name, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/htpasswd.py b/test/support/integration/plugins/modules/htpasswd.py new file mode 100644 index 00000000..ad12b0c0 --- /dev/null +++ b/test/support/integration/plugins/modules/htpasswd.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Nimbis Services, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +module: htpasswd +version_added: "1.3" +short_description: manage user files for basic authentication +description: + - Add and remove username/password entries in a password file using htpasswd. + - This is used by web servers such as Apache and Nginx for basic authentication. +options: + path: + required: true + aliases: [ dest, destfile ] + description: + - Path to the file that contains the usernames and passwords + name: + required: true + aliases: [ username ] + description: + - User name to add or remove + password: + required: false + description: + - Password associated with user. + - Must be specified if user does not exist yet. + crypt_scheme: + required: false + choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + default: "apr_md5_crypt" + description: + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx + state: + required: false + choices: [ present, absent ] + default: "present" + description: + - Whether the user entry should be present or not + create: + required: false + type: bool + default: "yes" + description: + - Used with C(state=present). If specified, the file will be created + if it does not already exist. If set to "no", will fail if the + file does not exist +notes: + - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." + - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." + - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." +requirements: [ passlib>=1.6 ] +author: "Ansible Core Team" +extends_documentation_fragment: files +""" + +EXAMPLES = """ +# Add a user to a password file and ensure permissions are set +- htpasswd: + path: /etc/nginx/passwdfile + name: janedoe + password: '9s36?;fyNp' + owner: root + group: www-data + mode: 0640 + +# Remove a user from a password file +- htpasswd: + path: /etc/apache2/passwdfile + name: foobar + state: absent + +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: + path: /etc/mail/passwords + name: alex + password: oedu2eGh + crypt_scheme: md5_crypt +""" + + +import os +import tempfile +import traceback +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +PASSLIB_IMP_ERR = None +try: + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext + import passlib +except ImportError: + PASSLIB_IMP_ERR = traceback.format_exc() + passlib_installed = False +else: + passlib_installed = True + +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + + +def create_missing_directories(dest): + destpath = os.path.dirname(dest) + if not os.path.exists(destpath): + os.makedirs(destpath) + + +def present(dest, username, password, crypt_scheme, create, check_mode): + """ Ensures user is present + + Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes=[crypt_scheme] + apache_hashes) + if not os.path.exists(dest): + if not create: + raise ValueError('Destination %s does not exist' % dest) + if check_mode: + return ("Create %s" % dest, True) + create_missing_directories(dest) + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Created %s and added %s" % (dest, username), True) + else: + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) + + found = None + if getattr(ht, 'check_password', None): + found = ht.check_password(username, password) + else: + found = ht.verify(username, password) + + if found: + return ("%s already present" % username, False) + else: + if not check_mode: + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Add/update %s" % username, True) + + +def absent(dest, username, check_mode): + """ Ensures user is absent + + Returns (msg, changed) """ + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False) + else: + ht = HtpasswdFile(dest) + + if username not in ht.users(): + return ("%s not present" % username, False) + else: + if not check_mode: + ht.delete(username) + ht.save() + return ("Remove %s" % username, True) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + arg_spec = dict( + path=dict(required=True, aliases=["dest", "destfile"]), + name=dict(required=True, aliases=["username"]), + password=dict(required=False, default=None, no_log=True), + crypt_scheme=dict(required=False, default="apr_md5_crypt"), + state=dict(required=False, default="present"), + create=dict(type='bool', default='yes'), + + ) + module = AnsibleModule(argument_spec=arg_spec, + add_file_common_args=True, + supports_check_mode=True) + + path = module.params['path'] + username = module.params['name'] + password = module.params['password'] + crypt_scheme = module.params['crypt_scheme'] + state = module.params['state'] + create = module.params['create'] + check_mode = module.check_mode + + if not passlib_installed: + module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR) + + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + try: + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: + try: + lines = f.readlines() + finally: + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [f.write(line) for line in lines if line.strip()] + finally: + f.close() + + try: + if state == 'present': + (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) + elif state == 'absent': + if not os.path.exists(path): + module.exit_json(msg="%s not present" % username, + warnings="%s does not exist" % path, changed=False) + (msg, changed) = absent(path, username, check_mode) + else: + module.fail_json(msg="Invalid state: %s" % state) + + check_file_attrs(module, changed, msg) + module.exit_json(msg=msg, changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/locale_gen.py b/test/support/integration/plugins/modules/locale_gen.py new file mode 100644 index 00000000..4968b834 --- /dev/null +++ b/test/support/integration/plugins/modules/locale_gen.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: locale_gen +short_description: Creates or removes locales +description: + - Manages locales by editing /etc/locale.gen and invoking locale-gen. +version_added: "1.6" +author: +- Augustus Kling (@AugustusKling) +options: + name: + description: + - Name and encoding of the locale, such as "en_GB.UTF-8". + required: true + state: + description: + - Whether the locale shall be present. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = ''' +- name: Ensure a locale exists + locale_gen: + name: de_CH.UTF-8 + state: present +''' + +import os +import re +from subprocess import Popen, PIPE, call + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", + ".iso885915": ".ISO-8859-15", + ".cp1251": ".CP1251", + ".koi8r": ".KOI8-R", + ".armscii8": ".ARMSCII-8", + ".euckr": ".EUC-KR", + ".gbk": ".GBK", + ".gb18030": ".GB18030", + ".euctw": ".EUC-TW", +} + + +# =========================================== +# location module specific support methods. +# + +def is_available(name, ubuntuMode): + """Check if the given locale is available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + if ubuntuMode: + __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$' + __locales_available = '/usr/share/i18n/SUPPORTED' + else: + __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$' + __locales_available = '/etc/locale.gen' + + re_compiled = re.compile(__regexp) + fd = open(__locales_available, 'r') + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + fd.close() + return False + + +def is_present(name): + """Checks if the given locale is currently installed.""" + output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] + output = to_native(output) + return any(fix_case(name) == fix_case(line) for line in output.splitlines()) + + +def fix_case(name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + for s, r in LOCALE_NORMALIZATION.items(): + name = name.replace(s, r) + return name + + +def replace_line(existing_line, new_line): + """Replaces lines in /etc/locale.gen""" + try: + f = open("/etc/locale.gen", "r") + lines = [line.replace(existing_line, new_line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def set_locale(name, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name + if enabled: + new_string = r'%s \g<charset>' % (name) + else: + new_string = r'# %s \g<charset>' % (name) + try: + f = open("/etc/locale.gen", "r") + lines = [re.sub(search_string, new_string, line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def apply_change(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + set_locale(name, enabled=True) + else: + # Delete locale. + set_locale(name, enabled=False) + + localeGenExitValue = call("locale-gen") + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def apply_change_ubuntu(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + localeGenExitValue = call(["locale-gen", name]) + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + try: + f = open("/var/lib/locales/supported.d/local", "r") + content = f.readlines() + finally: + f.close() + try: + f = open("/var/lib/locales/supported.d/local", "w") + for line in content: + locale, charset = line.split(' ') + if locale != name: + f.write(line) + finally: + f.close() + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + localeGenExitValue = call(["locale-gen", "--purge"]) + + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + + if not os.path.exists("/etc/locale.gen"): + if os.path.exists("/var/lib/locales/supported.d/"): + # Ubuntu created its own system to manage locales. + ubuntuMode = True + else: + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") + else: + # We found the common way to manage locales. + ubuntuMode = False + + if not is_available(name, ubuntuMode): + module.fail_json(msg="The locale you've entered is not available " + "on your system.") + + if is_present(name): + prev_state = "present" + else: + prev_state = "absent" + changed = (prev_state != state) + + if module.check_mode: + module.exit_json(changed=changed) + else: + if changed: + try: + if ubuntuMode is False: + apply_change(state, name) + else: + apply_change_ubuntu(state, name) + except EnvironmentError as e: + module.fail_json(msg=to_native(e), exitValue=e.errno) + + module.exit_json(name=name, changed=changed, msg="OK") + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/lvg.py b/test/support/integration/plugins/modules/lvg.py new file mode 100644 index 00000000..e2035f68 --- /dev/null +++ b/test/support/integration/plugins/modules/lvg.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com> +# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +author: +- Alexander Bulimov (@abulimov) +module: lvg +short_description: Configure LVM volume groups +description: + - This module creates, removes or resizes volume groups. +version_added: "1.1" +options: + vg: + description: + - The name of the volume group. + type: str + required: true + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or resizing volume group. + - The module will take care of running pvcreate if needed. + type: list + pesize: + description: + - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector + (where the sector size is the largest sector size of the PVs currently used in the VG), + or at least 128KiB." + - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. + type: str + default: "4" + pv_options: + description: + - Additional options to pass to C(pvcreate) when creating the volume group. + type: str + version_added: "2.4" + vg_options: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + type: str + version_added: "1.6" + state: + description: + - Control if the volume group exists. + type: str + choices: [ absent, present ] + default: present + force: + description: + - If C(yes), allows to remove volume group with logical volumes. + type: bool + default: no +seealso: +- module: filesystem +- module: lvol +- module: parted +notes: + - This module does not modify PE size for already present volume group. +''' + +EXAMPLES = r''' +- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB + lvg: + vg: vg.services + pvs: /dev/sda1 + pesize: 32 + +- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB + lvg: + vg: vg.services + pvs: /dev/sdb + pesize: 128K + +# If, for example, we already have VG vg.services on top of /dev/sdb1, +# this VG will be extended by /dev/sdc5. Or if vg.services was created on +# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, +# and then reduce by /dev/sda5. +- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. + lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc5 + +- name: Remove a volume group with name vg.services + lvg: + vg: vg.services + state: absent +''' + +import itertools +import os + +from ansible.module_utils.basic import AnsibleModule + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'pv_count': int(parts[1]), + 'lv_count': int(parts[2]), + }) + return vgs + + +def find_mapper_device_name(module, dm_device): + dmsetup_cmd = module.get_bin_path('dmsetup', True) + mapper_prefix = '/dev/mapper/' + rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) + if rc != 0: + module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) + mapper_device = mapper_prefix + dm_name.rstrip() + return mapper_device + + +def parse_pvs(module, data): + pvs = [] + dm_prefix = '/dev/dm-' + for line in data.splitlines(): + parts = line.strip().split(';') + if parts[0].startswith(dm_prefix): + parts[0] = find_mapper_device_name(module, parts[0]) + pvs.append({ + 'name': parts[0], + 'vg_name': parts[1], + }) + return pvs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + pvs=dict(type='list'), + pesize=dict(type='str', default='4'), + pv_options=dict(type='str', default=''), + vg_options=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'present']), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + state = module.params['state'] + force = module.boolean(module.params['force']) + pesize = module.params['pesize'] + pvoptions = module.params['pv_options'].split() + vgoptions = module.params['vg_options'].split() + + dev_list = [] + if module.params['pvs']: + dev_list = list(module.params['pvs']) + elif state == 'present': + module.fail_json(msg="No physical volumes given.") + + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) + + if state == 'present': + # check given devices + for test_dev in dev_list: + if not os.path.exists(test_dev): + module.fail_json(msg="Device %s not found." % test_dev) + + # get pv list + pvs_cmd = module.get_bin_path('pvs', True) + if dev_list: + pvs_filter_pv_name = ' || '.join( + 'pv_name = {0}'.format(x) + for x in itertools.chain(dev_list, module.params['pvs']) + ) + pvs_filter_vg_name = 'vg_name = {0}'.format(vg) + pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name) + else: + pvs_filter = '' + rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) + if rc != 0: + module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) + + # check pv for devices + pvs = parse_pvs(module, current_pvs) + used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] + if used_pvs: + module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) + + vgs_cmd = module.get_bin_path('vgs', True) + rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) + + if rc != 0: + module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) + + changed = False + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + if this_vg is None: + if state == 'present': + # create VG + if module.check_mode: + changed = True + else: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in dev_list: + rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + vgcreate_cmd = module.get_bin_path('vgcreate') + rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) + else: + if state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + else: + if this_vg['lv_count'] == 0 or force: + # remove VG + vgremove_cmd = module.get_bin_path('vgremove', True) + rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) + else: + module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg)) + + # resize VG + current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + devs_to_add_string = ' '.join(devs_to_add) + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + # add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) + + # remove some PV from our VG + if devs_to_remove: + devs_to_remove_string = ' '.join(devs_to_remove) + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/mongodb_parameter.py b/test/support/integration/plugins/modules/mongodb_parameter.py new file mode 100644 index 00000000..05de42b2 --- /dev/null +++ b/test/support/integration/plugins/modules/mongodb_parameter.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Loic Blot <loic.blot@unix-experience.fr> +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: mongodb_parameter +short_description: Change an administrative parameter on a MongoDB server +description: + - Change an administrative parameter on a MongoDB server. +version_added: "2.1" +options: + login_user: + description: + - The MongoDB username used to authenticate with. + type: str + login_password: + description: + - The login user's password used to authenticate with. + type: str + login_host: + description: + - The host running the database. + type: str + default: localhost + login_port: + description: + - The MongoDB port to connect to. + default: 27017 + type: int + login_database: + description: + - The database where login credentials are stored. + type: str + replica_set: + description: + - Replica set to connect to (automatically connects to primary for writes). + type: str + ssl: + description: + - Whether to use an SSL connection when connecting to the database. + type: bool + default: no + param: + description: + - MongoDB administrative parameter to modify. + type: str + required: true + value: + description: + - MongoDB administrative parameter value to set. + type: str + required: true + param_type: + description: + - Define the type of parameter value. + default: str + type: str + choices: [int, str] + +notes: + - Requires the pymongo Python package on the remote host, version 2.4.2+. + - This can be installed using pip or the OS package manager. + - See also U(http://api.mongodb.org/python/current/installation.html) +requirements: [ "pymongo" ] +author: "Loic Blot (@nerzhul)" +''' + +EXAMPLES = r''' +- name: Set MongoDB syncdelay to 60 (this is an int) + mongodb_parameter: + param: syncdelay + value: 60 + param_type: int +''' + +RETURN = r''' +before: + description: value before modification + returned: success + type: str +after: + description: value after modification + returned: success + type: str +''' + +import os +import traceback + +try: + from pymongo.errors import ConnectionFailure + from pymongo.errors import OperationFailure + from pymongo import version as PyMongoVersion + from pymongo import MongoClient +except ImportError: + try: # for older PyMongo 2.2 + from pymongo import Connection as MongoClient + except ImportError: + pymongo_found = False + else: + pymongo_found = True +else: + pymongo_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves import configparser +from ansible.module_utils._text import to_native + + +# ========================================= +# MongoDB module specific support methods. +# + +def load_mongocnf(): + config = configparser.RawConfigParser() + mongocnf = os.path.expanduser('~/.mongodb.cnf') + + try: + config.readfp(open(mongocnf)) + creds = dict( + user=config.get('client', 'user'), + password=config.get('client', 'pass') + ) + except (configparser.NoOptionError, IOError): + return False + + return creds + + +# ========================================= +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None), + login_password=dict(default=None, no_log=True), + login_host=dict(default='localhost'), + login_port=dict(default=27017, type='int'), + login_database=dict(default=None), + replica_set=dict(default=None), + param=dict(required=True), + value=dict(required=True), + param_type=dict(default="str", choices=['str', 'int']), + ssl=dict(default=False, type='bool'), + ) + ) + + if not pymongo_found: + module.fail_json(msg=missing_required_lib('pymongo')) + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + login_database = module.params['login_database'] + + replica_set = module.params['replica_set'] + ssl = module.params['ssl'] + + param = module.params['param'] + param_type = module.params['param_type'] + value = module.params['value'] + + # Verify parameter is coherent with specified type + try: + if param_type == 'int': + value = int(value) + except ValueError: + module.fail_json(msg="value '%s' is not %s" % (value, param_type)) + + try: + if replica_set: + client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) + else: + client = MongoClient(login_host, int(login_port), ssl=ssl) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None or login_user is None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password, source=login_database) + + except ConnectionFailure as e: + module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc()) + + db = client.admin + + try: + after_value = db.command("setParameter", **{param: value}) + except OperationFailure as e: + module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc()) + + if "was" not in after_value: + module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.") + else: + module.exit_json(changed=(value != after_value["was"]), before=after_value["was"], + after=value) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/mongodb_user.py b/test/support/integration/plugins/modules/mongodb_user.py new file mode 100644 index 00000000..362b3aa4 --- /dev/null +++ b/test/support/integration/plugins/modules/mongodb_user.py @@ -0,0 +1,474 @@ +#!/usr/bin/python + +# (c) 2012, Elliott Foster <elliott@fourkitchens.com> +# Sponsored by Four Kitchens http://fourkitchens.com. +# (c) 2014, Epic Games, Inc. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: mongodb_user +short_description: Adds or removes a user from a MongoDB database +description: + - Adds or removes a user from a MongoDB database. +version_added: "1.1" +options: + login_user: + description: + - The MongoDB username used to authenticate with. + type: str + login_password: + description: + - The login user's password used to authenticate with. + type: str + login_host: + description: + - The host running the database. + default: localhost + type: str + login_port: + description: + - The MongoDB port to connect to. + default: '27017' + type: str + login_database: + version_added: "2.0" + description: + - The database where login credentials are stored. + type: str + replica_set: + version_added: "1.6" + description: + - Replica set to connect to (automatically connects to primary for writes). + type: str + database: + description: + - The name of the database to add/remove the user from. + required: true + type: str + aliases: [db] + name: + description: + - The name of the user to add or remove. + required: true + aliases: [user] + type: str + password: + description: + - The password to use for the user. + type: str + aliases: [pass] + ssl: + version_added: "1.8" + description: + - Whether to use an SSL connection when connecting to the database. + type: bool + ssl_cert_reqs: + version_added: "2.2" + description: + - Specifies whether a certificate is required from the other side of the connection, + and whether it will be validated if provided. + default: CERT_REQUIRED + choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED] + type: str + roles: + version_added: "1.3" + type: list + elements: raw + description: + - > + The database user roles valid values could either be one or more of the following strings: + 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', + 'dbAdminAnyDatabase' + - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'." + - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required." + state: + description: + - The database user state. + default: present + choices: [absent, present] + type: str + update_password: + default: always + choices: [always, on_create] + version_added: "2.1" + description: + - C(always) will update passwords if they differ. + - C(on_create) will only set the password for newly created users. + type: str + +notes: + - Requires the pymongo Python package on the remote host, version 2.4.2+. This + can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html +requirements: [ "pymongo" ] +author: + - "Elliott Foster (@elliotttf)" + - "Julien Thebault (@Lujeni)" +''' + +EXAMPLES = ''' +- name: Create 'burgers' database user with name 'bob' and password '12345'. + mongodb_user: + database: burgers + name: bob + password: 12345 + state: present + +- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly) + mongodb_user: + database: burgers + name: bob + password: 12345 + state: present + ssl: True + +- name: Delete 'burgers' database user with name 'bob'. + mongodb_user: + database: burgers + name: bob + state: absent + +- name: Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style) + mongodb_user: + database: burgers + name: ben + password: 12345 + roles: read + state: present + +- name: Define roles + mongodb_user: + database: burgers + name: jim + password: 12345 + roles: readWrite,dbAdmin,userAdmin + state: present + +- name: Define roles + mongodb_user: + database: burgers + name: joe + password: 12345 + roles: readWriteAnyDatabase + state: present + +- name: Add a user to database in a replica set, the primary server is automatically discovered and written to + mongodb_user: + database: burgers + name: bob + replica_set: belcher + password: 12345 + roles: readWriteAnyDatabase + state: present + +# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL). +# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials +# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin" +# This syntax requires mongodb 2.6+ and pymongo 2.5+ +- name: Roles as a dictionary + mongodb_user: + login_user: root + login_password: root_password + database: admin + user: oplog_reader + password: oplog_reader_password + state: present + replica_set: belcher + roles: + - db: local + role: read + +''' + +RETURN = ''' +user: + description: The name of the user to add or remove. + returned: success + type: str +''' + +import os +import ssl as ssl_lib +import traceback +from distutils.version import LooseVersion +from operator import itemgetter + +try: + from pymongo.errors import ConnectionFailure + from pymongo.errors import OperationFailure + from pymongo import version as PyMongoVersion + from pymongo import MongoClient +except ImportError: + try: # for older PyMongo 2.2 + from pymongo import Connection as MongoClient + except ImportError: + pymongo_found = False + else: + pymongo_found = True +else: + pymongo_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six import binary_type, text_type +from ansible.module_utils.six.moves import configparser +from ansible.module_utils._text import to_native + + +# ========================================= +# MongoDB module specific support methods. +# + +def check_compatibility(module, client): + """Check the compatibility between the driver and the database. + + See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility + + Args: + module: Ansible module. + client (cursor): Mongodb cursor on admin database. + """ + loose_srv_version = LooseVersion(client.server_info()['version']) + loose_driver_version = LooseVersion(PyMongoVersion) + + if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'): + module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)') + + elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'): + module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)') + + elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'): + module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)') + + elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): + module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)') + + +def user_find(client, user, db_name): + """Check if the user exists. + + Args: + client (cursor): Mongodb cursor on admin database. + user (str): User to check. + db_name (str): User's database. + + Returns: + dict: when user exists, False otherwise. + """ + for mongo_user in client["admin"].system.users.find(): + if mongo_user['user'] == user: + # NOTE: there is no 'db' field in mongo 2.4. + if 'db' not in mongo_user: + return mongo_user + + if mongo_user["db"] == db_name: + return mongo_user + return False + + +def user_add(module, client, db_name, user, password, roles): + # pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated + # without reproducing a lot of the logic in database.py of pymongo + db = client[db_name] + + if roles is None: + db.add_user(user, password, False) + else: + db.add_user(user, password, None, roles=roles) + + +def user_remove(module, client, db_name, user): + exists = user_find(client, user, db_name) + if exists: + if module.check_mode: + module.exit_json(changed=True, user=user) + db = client[db_name] + db.remove_user(user) + else: + module.exit_json(changed=False, user=user) + + +def load_mongocnf(): + config = configparser.RawConfigParser() + mongocnf = os.path.expanduser('~/.mongodb.cnf') + + try: + config.readfp(open(mongocnf)) + creds = dict( + user=config.get('client', 'user'), + password=config.get('client', 'pass') + ) + except (configparser.NoOptionError, IOError): + return False + + return creds + + +def check_if_roles_changed(uinfo, roles, db_name): + # We must be aware of users which can read the oplog on a replicaset + # Such users must have access to the local DB, but since this DB does not store users credentials + # and is not synchronized among replica sets, the user must be stored on the admin db + # Therefore their structure is the following : + # { + # "_id" : "admin.oplog_reader", + # "user" : "oplog_reader", + # "db" : "admin", # <-- admin DB + # "roles" : [ + # { + # "role" : "read", + # "db" : "local" # <-- local DB + # } + # ] + # } + + def make_sure_roles_are_a_list_of_dict(roles, db_name): + output = list() + for role in roles: + if isinstance(role, (binary_type, text_type)): + new_role = {"role": role, "db": db_name} + output.append(new_role) + else: + output.append(role) + return output + + roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name) + uinfo_roles = uinfo.get('roles', []) + + if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')): + return False + return True + + +# ========================================= +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None), + login_password=dict(default=None, no_log=True), + login_host=dict(default='localhost'), + login_port=dict(default='27017'), + login_database=dict(default=None), + replica_set=dict(default=None), + database=dict(required=True, aliases=['db']), + name=dict(required=True, aliases=['user']), + password=dict(aliases=['pass'], no_log=True), + ssl=dict(default=False, type='bool'), + roles=dict(default=None, type='list', elements='raw'), + state=dict(default='present', choices=['absent', 'present']), + update_password=dict(default="always", choices=["always", "on_create"]), + ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']), + ), + supports_check_mode=True + ) + + if not pymongo_found: + module.fail_json(msg=missing_required_lib('pymongo')) + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + login_database = module.params['login_database'] + + replica_set = module.params['replica_set'] + db_name = module.params['database'] + user = module.params['name'] + password = module.params['password'] + ssl = module.params['ssl'] + roles = module.params['roles'] or [] + state = module.params['state'] + update_password = module.params['update_password'] + + try: + connection_params = { + "host": login_host, + "port": int(login_port), + } + + if replica_set: + connection_params["replicaset"] = replica_set + + if ssl: + connection_params["ssl"] = ssl + connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs']) + + client = MongoClient(**connection_params) + + # NOTE: this check must be done ASAP. + # We doesn't need to be authenticated (this ability has lost in PyMongo 3.6) + if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'): + check_compatibility(module, client) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None or login_user is None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password, source=login_database) + elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): + if db_name != "admin": + module.fail_json(msg='The localhost login exception only allows the first admin account to be created') + # else: this has to be the first admin user added + + except Exception as e: + module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc()) + + if state == 'present': + if password is None and update_password == 'always': + module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') + + try: + if update_password != 'always': + uinfo = user_find(client, user, db_name) + if uinfo: + password = None + if not check_if_roles_changed(uinfo, roles, db_name): + module.exit_json(changed=False, user=user) + + if module.check_mode: + module.exit_json(changed=True, user=user) + + user_add(module, client, db_name, user, password, roles) + except Exception as e: + module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc()) + finally: + try: + client.close() + except Exception: + pass + # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848 + # newuinfo = user_find(client, user, db_name) + # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere: + # module.exit_json(changed=False, user=user) + + elif state == 'absent': + try: + user_remove(module, client, db_name, user) + except Exception as e: + module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc()) + finally: + try: + client.close() + except Exception: + pass + module.exit_json(changed=True, user=user) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/pids.py b/test/support/integration/plugins/modules/pids.py new file mode 100644 index 00000000..4cbf45a9 --- /dev/null +++ b/test/support/integration/plugins/modules/pids.py @@ -0,0 +1,89 @@ +#!/usr/bin/python +# Copyright: (c) 2019, Saranya Sridharan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: pids +version_added: 2.8 +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists." +short_description: "Retrieves process IDs list if the process is running otherwise return empty list" +author: + - Saranya Sridharan (@saranyasridharan) +requirements: + - psutil(python module) +options: + name: + description: the name of the process you want to get PID for. + required: true + type: str +''' + +EXAMPLES = ''' +# Pass the process name +- name: Getting process IDs of the process + pids: + name: python + register: pids_of_python + +- name: Printing the process IDs obtained + debug: + msg: "PIDS of python:{{pids_of_python.pids|join(',')}}" +''' + +RETURN = ''' +pids: + description: Process IDs of the given process + returned: list of none, one, or more process IDs + type: list + sample: [100,200] +''' + +from ansible.module_utils.basic import AnsibleModule +try: + import psutil + HAS_PSUTIL = True +except ImportError: + HAS_PSUTIL = False + + +def compare_lower(a, b): + if a is None or b is None: + # this could just be "return False" but would lead to surprising behavior if both a and b are None + return a == b + + return a.lower() == b.lower() + + +def get_pid(name): + pids = [] + + for proc in psutil.process_iter(attrs=['name', 'cmdline']): + if compare_lower(proc.info['name'], name) or \ + proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): + pids.append(proc.pid) + + return pids + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type="str"), + ), + supports_check_mode=True, + ) + if not HAS_PSUTIL: + module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil") + name = module.params["name"] + response = dict(pids=get_pid(name)) + module.exit_json(**response) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/pkgng.py b/test/support/integration/plugins/modules/pkgng.py new file mode 100644 index 00000000..11363479 --- /dev/null +++ b/test/support/integration/plugins/modules/pkgng.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, bleader +# Written by bleader <bleader@ratonland.org> +# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com> +# that was based on pacman module written by Afterburn <https://github.com/afterburn> +# that was based on apt module written by Matthew Williams <matthew@flowroute.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pkgng +short_description: Package manager for FreeBSD >= 9.0 +description: + - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. +version_added: "1.2" +options: + name: + description: + - Name or list of names of packages to install/remove. + required: true + state: + description: + - State of the package. + - 'Note: "latest" added in 2.7' + choices: [ 'present', 'latest', 'absent' ] + required: false + default: present + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: no + annotation: + description: + - A comma-separated list of keyvalue-pairs of the form + C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a + C(-) denotes removing an annotation, and C(:) denotes modifying an + annotation. + If setting or modifying annotations, a value must be provided. + required: false + version_added: "1.6" + pkgsite: + description: + - For pkgng versions before 1.1.4, specify packagesite to use + for downloading packages. If not specified, use settings from + C(/usr/local/etc/pkg.conf). + - For newer pkgng versions, specify a the name of a repository + configured in C(/usr/local/etc/pkg/repos). + required: false + rootdir: + description: + - For pkgng versions 1.5 and later, pkg will install all packages + within the specified root directory. + - Can not be used together with I(chroot) or I(jail) options. + required: false + chroot: + version_added: "2.1" + description: + - Pkg will chroot in the specified environment. + - Can not be used together with I(rootdir) or I(jail) options. + required: false + jail: + version_added: "2.4" + description: + - Pkg will execute in the given jail name or id. + - Can not be used together with I(chroot) or I(rootdir) options. + autoremove: + version_added: "2.2" + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: no +author: "bleader (@bleader)" +notes: + - When using pkgsite, be careful that already in cache packages won't be downloaded again. + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +''' + +EXAMPLES = ''' +- name: Install package foo + pkgng: + name: foo + state: present + +- name: Annotate package foo and bar + pkgng: + name: foo,bar + annotation: '+test1=baz,-test2,:test3=foobar' + +- name: Remove packages foo and bar + pkgng: + name: foo,bar + state: absent + +# "latest" support added in 2.7 +- name: Upgrade package baz + pkgng: + name: baz + state: latest +''' + + +import re +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, pkgng_path, name, dir_arg): + + rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name)) + + if rc == 0: + return True + + return False + + +def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite): + + # Check to see if a package upgrade is available. + # rc = 0, no updates available or package not installed + # rc = 1, updates available + if old_pkgng: + rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name)) + else: + rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name)) + + if rc == 1: + return True + + return False + + +def pkgng_older_than(module, pkgng_path, compare_version): + + rc, out, err = module.run_command("%s -v" % pkgng_path) + version = [int(x) for x in re.split(r'[\._]', out)] + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + + +def remove_packages(module, pkgng_path, packages, dir_arg): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, pkgng_path, package, dir_arg): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package)) + + if not module.check_mode and query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + return (True, "removed %s package(s)" % remove_c) + + return (False, "package(s) already absent") + + +def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state): + + install_c = 0 + + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) + if pkgsite != "": + if old_pkgng: + pkgsite = "PACKAGESITE=%s" % (pkgsite) + else: + pkgsite = "-r %s" % (pkgsite) + + # This environment variable skips mid-install prompts, + # setting them to their default values. + batch_var = 'env BATCH=yes' + + if not module.check_mode and not cached: + if old_pkgng: + rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) + else: + rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg)) + if rc != 0: + module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err)) + + for package in packages: + already_installed = query_package(module, pkgng_path, package, dir_arg) + if already_installed and state == "present": + continue + + update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite) + if not update_available and already_installed and state == "latest": + continue + + if not module.check_mode: + if already_installed: + action = "upgrade" + else: + action = "install" + if old_pkgng: + rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package)) + else: + rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package)) + + if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stderr=err) + + install_c += 1 + + if install_c > 0: + return (True, "added %s package(s)" % (install_c)) + + return (False, "package(s) already %s" % (state)) + + +def annotation_query(module, pkgng_path, package, tag, dir_arg): + rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package)) + match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not _value: + # Annotation does not exist, add it. + rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + + +def annotation_delete(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if _value: + rc, out, err = module.run_command('%s %s annotate -y -D %s %s' + % (pkgng_path, dir_arg, package, tag)) + if rc != 0: + module.fail_json(msg="could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + + +def annotation_modify(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not value: + # No such tag + module.fail_json(msg="could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not change annotation annotation to %s: %s" + % (package, out), stderr=err) + return True + + +def annotate_packages(module, pkgng_path, packages, annotation, dir_arg): + annotate_c = 0 + annotations = map(lambda _annotation: + re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?', + _annotation).groupdict(), + re.split(r',', annotation)) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for _annotation in annotations: + if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']): + annotate_c += 1 + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") + + +def autoremove_packages(module, pkgng_path, dir_arg): + rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg)) + + autoremove_c = 0 + + match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + if match: + autoremove_c = int(match.group(1)) + + if autoremove_c == 0: + return False, "no package(s) to autoremove" + + if not module.check_mode: + rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg)) + + return True, "autoremoved %d package(s)" % (autoremove_c) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "latest", "absent"], required=False), + name=dict(aliases=["pkg"], required=True, type='list'), + cached=dict(default=False, type='bool'), + annotation=dict(default="", required=False), + pkgsite=dict(default="", required=False), + rootdir=dict(default="", required=False, type='path'), + chroot=dict(default="", required=False, type='path'), + jail=dict(default="", required=False, type='str'), + autoremove=dict(default=False, type='bool')), + supports_check_mode=True, + mutually_exclusive=[["rootdir", "chroot", "jail"]]) + + pkgng_path = module.get_bin_path('pkg', True) + + p = module.params + + pkgs = p["name"] + + changed = False + msgs = [] + dir_arg = "" + + if p["rootdir"] != "": + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0]) + if old_pkgng: + module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") + else: + dir_arg = "--rootdir %s" % (p["rootdir"]) + + if p["chroot"] != "": + dir_arg = '--chroot %s' % (p["chroot"]) + + if p["jail"] != "": + dir_arg = '--jail %s' % (p["jail"]) + + if p["state"] in ("present", "latest"): + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg, p["state"]) + changed = changed or _changed + msgs.append(_msg) + + elif p["state"] == "absent": + _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["autoremove"]: + _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["annotation"]: + _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_db.py b/test/support/integration/plugins/modules/postgresql_db.py new file mode 100644 index 00000000..40858d99 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_db.py @@ -0,0 +1,657 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_db +short_description: Add or remove PostgreSQL databases from a remote host. +description: + - Add or remove PostgreSQL databases from a remote host. +version_added: '0.6' +options: + name: + description: + - Name of the database to add or remove + type: str + required: true + aliases: [ db ] + port: + description: + - Database port to connect (if needed) + type: int + default: 5432 + aliases: + - login_port + owner: + description: + - Name of the role to set as owner of the database + type: str + template: + description: + - Template used to create the database + type: str + encoding: + description: + - Encoding of the database + type: str + lc_collate: + description: + - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template. + type: str + lc_ctype: + description: + - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) + is used as template. + type: str + session_role: + description: + - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + version_added: '2.8' + state: + description: + - The database state. + - C(present) implies that the database should be created if necessary. + - C(absent) implies that the database should be removed if present. + - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4) + Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module, + returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.), + so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of + pg_dump returns rc 1 in this case. + - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4) + - The format of the backup will be detected based on the target name. + - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz) + - Supported formats for dump and restore include C(.sql) and C(.tar) + type: str + choices: [ absent, dump, present, restore ] + default: present + target: + description: + - File to back up or restore from. + - Used when I(state) is C(dump) or C(restore). + type: path + version_added: '2.4' + target_opts: + description: + - Further arguments for pg_dump or pg_restore. + - Used when I(state) is C(dump) or C(restore). + type: str + version_added: '2.4' + maintenance_db: + description: + - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to. + type: str + default: postgres + version_added: '2.5' + conn_limit: + description: + - Specifies the database connection limit. + type: str + version_added: '2.8' + tablespace: + description: + - The tablespace to set for the database + U(https://www.postgresql.org/docs/current/sql-alterdatabase.html). + - If you want to move the database back to the default tablespace, + explicitly set this to pg_default. + type: path + version_added: '2.9' + dump_extra_args: + description: + - Provides additional arguments when I(state) is C(dump). + - Cannot be used with dump-file-format-related arguments like ``--format=d``. + type: str + version_added: '2.10' +seealso: +- name: CREATE DATABASE reference + description: Complete reference of the CREATE DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-createdatabase.html +- name: DROP DATABASE reference + description: Complete reference of the DROP DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-dropdatabase.html +- name: pg_dump reference + description: Complete reference of pg_dump documentation. + link: https://www.postgresql.org/docs/current/app-pgdump.html +- name: pg_restore reference + description: Complete reference of pg_restore documentation. + link: https://www.postgresql.org/docs/current/app-pgrestore.html +- module: postgresql_tablespace +- module: postgresql_info +- module: postgresql_ping +notes: +- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8. +author: "Ansible Core Team" +extends_documentation_fragment: +- postgres +''' + +EXAMPLES = r''' +- name: Create a new database with name "acme" + postgresql_db: + name: acme + +# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template. +- name: Create a new database with name "acme" and specific encoding and locale # settings. + postgresql_db: + name: acme + encoding: UTF-8 + lc_collate: de_DE.UTF-8 + lc_ctype: de_DE.UTF-8 + template: template0 + +# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited" +- name: Create a new database with name "acme" which has a limit of 100 concurrent connections + postgresql_db: + name: acme + conn_limit: "100" + +- name: Dump an existing database to a file + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + +- name: Dump an existing database to a file excluding the test table + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + dump_extra_args: --exclude-table=test + +- name: Dump an existing database to a file (with compression) + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql.gz + +- name: Dump a single schema for an existing database + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + target_opts: "-n public" + +# Note: In the example below, if database foo exists and has another tablespace +# the tablespace will be changed to foo. Access to the database will be locked +# until the copying of database files is finished. +- name: Create a new database called foo in tablespace bar + postgresql_db: + name: foo + tablespace: bar +''' + +RETURN = r''' +executed_commands: + description: List of commands which tried to run. + returned: always + type: list + sample: ["CREATE DATABASE acme"] + version_added: '2.10' +''' + + +import os +import subprocess +import traceback + +try: + import psycopg2 + import psycopg2.extras +except ImportError: + HAS_PSYCOPG2 = False +else: + HAS_PSYCOPG2 = True + +import ansible.module_utils.postgres as pgutils +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.database import SQLParseError, pg_quote_identifier +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_native + +executed_commands = [] + + +class NotSupportedError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def set_owner(cursor, db, owner): + query = 'ALTER DATABASE %s OWNER TO "%s"' % ( + pg_quote_identifier(db, 'database'), + owner) + executed_commands.append(query) + cursor.execute(query) + return True + + +def set_conn_limit(cursor, db, conn_limit): + query = "ALTER DATABASE %s CONNECTION LIMIT %s" % ( + pg_quote_identifier(db, 'database'), + conn_limit) + executed_commands.append(query) + cursor.execute(query) + return True + + +def get_encoding_id(cursor, encoding): + query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;" + cursor.execute(query, {'encoding': encoding}) + return cursor.fetchone()['encoding_id'] + + +def get_db_info(cursor, db): + query = """ + SELECT rolname AS owner, + pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, + datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit, + spcname AS tablespace + FROM pg_database + JOIN pg_roles ON pg_roles.oid = pg_database.datdba + JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace + WHERE datname = %(db)s + """ + cursor.execute(query, {'db': db}) + return cursor.fetchone() + + +def db_exists(cursor, db): + query = "SELECT * FROM pg_database WHERE datname=%(db)s" + cursor.execute(query, {'db': db}) + return cursor.rowcount == 1 + + +def db_delete(cursor, db): + if db_exists(cursor, db): + query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database') + executed_commands.append(query) + cursor.execute(query) + return True + else: + return False + + +def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace) + if not db_exists(cursor, db): + query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')] + if owner: + query_fragments.append('OWNER "%s"' % owner) + if template: + query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database')) + if encoding: + query_fragments.append('ENCODING %(enc)s') + if lc_collate: + query_fragments.append('LC_COLLATE %(collate)s') + if lc_ctype: + query_fragments.append('LC_CTYPE %(ctype)s') + if tablespace: + query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace')) + if conn_limit: + query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query = ' '.join(query_fragments) + executed_commands.append(cursor.mogrify(query, params)) + cursor.execute(query, params) + return True + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + raise NotSupportedError( + 'Changing database encoding is not supported. ' + 'Current encoding: %s' % db_info['encoding'] + ) + elif lc_collate and lc_collate != db_info['lc_collate']: + raise NotSupportedError( + 'Changing LC_COLLATE is not supported. ' + 'Current LC_COLLATE: %s' % db_info['lc_collate'] + ) + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + raise NotSupportedError( + 'Changing LC_CTYPE is not supported.' + 'Current LC_CTYPE: %s' % db_info['lc_ctype'] + ) + else: + changed = False + + if owner and owner != db_info['owner']: + changed = set_owner(cursor, db, owner) + + if conn_limit and conn_limit != str(db_info['conn_limit']): + changed = set_conn_limit(cursor, db, conn_limit) + + if tablespace and tablespace != db_info['tablespace']: + changed = set_tablespace(cursor, db, tablespace) + + return changed + + +def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + if not db_exists(cursor, db): + return False + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + return False + elif lc_collate and lc_collate != db_info['lc_collate']: + return False + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + return False + elif owner and owner != db_info['owner']: + return False + elif conn_limit and conn_limit != str(db_info['conn_limit']): + return False + elif tablespace and tablespace != db_info['tablespace']: + return False + else: + return True + + +def db_dump(module, target, target_opts="", + db=None, + dump_extra_args=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user, db_prefix=False) + cmd = module.get_bin_path('pg_dump', True) + comp_prog_path = None + + if os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=t') + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=c') + if os.path.splitext(target)[-1] == '.gz': + if module.get_bin_path('pigz'): + comp_prog_path = module.get_bin_path('pigz', True) + else: + comp_prog_path = module.get_bin_path('gzip', True) + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzip2', True) + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xz', True) + + cmd += "".join(flags) + + if dump_extra_args: + cmd += " {0} ".format(dump_extra_args) + + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + # Use a fifo to be notified of an error in pg_dump + # Using shell pipe has no way to return the code of the first command + # in a portable way. + fifo = os.path.join(module.tmpdir, 'pg_fifo') + os.mkfifo(fifo) + cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo) + else: + cmd = '{0} > {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def db_restore(module, target, target_opts="", + db=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user) + comp_prog_path = None + cmd = module.get_bin_path('psql', True) + + if os.path.splitext(target)[-1] == '.sql': + flags.append(' --file={0}'.format(target)) + + elif os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=Tar') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=Custom') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.gz': + comp_prog_path = module.get_bin_path('zcat', True) + + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzcat', True) + + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xzcat', True) + + cmd += "".join(flags) + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + env = os.environ.copy() + if password: + env = {"PGPASSWORD": password} + p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1, 'cmd: ****' + else: + return p2.returncode, '', stderr2, 'cmd: ****' + else: + cmd = '{0} < {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def login_flags(db, host, port, user, db_prefix=True): + """ + returns a list of connection argument strings each prefixed + with a space and quoted where necessary to later be combined + in a single shell string with `"".join(rv)` + + db_prefix determines if "--dbname" is prefixed to the db argument, + since the argument was introduced in 9.3. + """ + flags = [] + if db: + if db_prefix: + flags.append(' --dbname={0}'.format(shlex_quote(db))) + else: + flags.append(' {0}'.format(shlex_quote(db))) + if host: + flags.append(' --host={0}'.format(host)) + if port: + flags.append(' --port={0}'.format(port)) + if user: + flags.append(' --username={0}'.format(user)) + return flags + + +def do_with_password(module, cmd, password): + env = {} + if password: + env = {"PGPASSWORD": password} + executed_commands.append(cmd) + rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env) + return rc, stderr, stdout, cmd + + +def set_tablespace(cursor, db, tablespace): + query = "ALTER DATABASE %s SET TABLESPACE %s" % ( + pg_quote_identifier(db, 'database'), + pg_quote_identifier(tablespace, 'tablespace')) + executed_commands.append(query) + cursor.execute(query) + return True + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = pgutils.postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', required=True, aliases=['name']), + owner=dict(type='str', default=''), + template=dict(type='str', default=''), + encoding=dict(type='str', default=''), + lc_collate=dict(type='str', default=''), + lc_ctype=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']), + target=dict(type='path', default=''), + target_opts=dict(type='str', default=''), + maintenance_db=dict(type='str', default="postgres"), + session_role=dict(type='str'), + conn_limit=dict(type='str', default=''), + tablespace=dict(type='path', default=''), + dump_extra_args=dict(type='str', default=None), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + db = module.params["db"] + owner = module.params["owner"] + template = module.params["template"] + encoding = module.params["encoding"] + lc_collate = module.params["lc_collate"] + lc_ctype = module.params["lc_ctype"] + target = module.params["target"] + target_opts = module.params["target_opts"] + state = module.params["state"] + changed = False + maintenance_db = module.params['maintenance_db'] + session_role = module.params["session_role"] + conn_limit = module.params['conn_limit'] + tablespace = module.params['tablespace'] + dump_extra_args = module.params['dump_extra_args'] + + raw_connection = state in ("dump", "restore") + + if not raw_connection: + pgutils.ensure_required_libs(module) + + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "login_host": "host", + "login_user": "user", + "login_password": "password", + "port": "port", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) + if k in params_map and v != '' and v is not None) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + + if target == "": + target = "{0}/{1}.sql".format(os.getcwd(), db) + target = os.path.expanduser(target) + + if not raw_connection: + try: + db_connection = psycopg2.connect(database=maintenance_db, **kw) + + # Enable autocommit so we can create databases + if psycopg2.__version__ >= '2.4.2': + db_connection.autocommit = True + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + if session_role: + try: + cursor.execute('SET ROLE "%s"' % session_role) + except Exception as e: + module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc()) + + try: + if module.check_mode: + if state == "absent": + changed = db_exists(cursor, db) + elif state == "present": + changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + if state == "absent": + try: + changed = db_delete(cursor, db) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state == "present": + try: + changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state in ("dump", "restore"): + method = state == "dump" and db_dump or db_restore + try: + if state == 'dump': + rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw) + else: + rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw) + + if rc != 0: + module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd) + else: + module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd, + executed_commands=executed_commands) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # Avoid catching this on Python 2.4 + raise + except Exception as e: + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_privs.py b/test/support/integration/plugins/modules/postgresql_privs.py new file mode 100644 index 00000000..ba8324dd --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_privs.py @@ -0,0 +1,1097 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_privs +version_added: '1.2' +short_description: Grant or revoke privileges on PostgreSQL database objects +description: +- Grant or revoke privileges on PostgreSQL database objects. +- This module is basically a wrapper around most of the functionality of + PostgreSQL's GRANT and REVOKE statements with detection of changes + (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)). +options: + database: + description: + - Name of database to connect to. + required: yes + type: str + aliases: + - db + - login_db + state: + description: + - If C(present), the specified privileges are granted, if C(absent) they are revoked. + type: str + default: present + choices: [ absent, present ] + privs: + description: + - Comma separated list of privileges to grant/revoke. + type: str + aliases: + - priv + type: + description: + - Type of database object to set privileges on. + - The C(default_privs) choice is available starting at version 2.7. + - The C(foreign_data_wrapper) and C(foreign_server) object types are available from Ansible version '2.8'. + - The C(type) choice is available from Ansible version '2.10'. + type: str + default: table + choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function, + group, language, table, tablespace, schema, sequence, type ] + objs: + description: + - Comma separated list of database objects to set privileges on. + - If I(type) is C(table), C(partition table), C(sequence) or C(function), + the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all + database objects of type I(type) in the schema specified via I(schema). + (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available + for C(function) and C(partition table) from version 2.8) + - If I(type) is C(database), this parameter can be omitted, in which case + privileges are set for the database specified via I(database). + - 'If I(type) is I(function), colons (":") in object names will be + replaced with commas (needed to specify function signatures, see examples)' + type: str + aliases: + - obj + schema: + description: + - Schema that contains the database objects specified via I(objs). + - May only be provided if I(type) is C(table), C(sequence), C(function), C(type), + or C(default_privs). Defaults to C(public) in these cases. + - Pay attention, for embedded types when I(type=type) + I(schema) can be C(pg_catalog) or C(information_schema) respectively. + type: str + roles: + description: + - Comma separated list of role (user/group) names to set permissions for. + - The special value C(PUBLIC) can be provided instead to set permissions + for the implicitly defined PUBLIC group. + type: str + required: yes + aliases: + - role + fail_on_role: + version_added: '2.8' + description: + - If C(yes), fail when target role (for whom privs need to be granted) does not exist. + Otherwise just warn and continue. + default: yes + type: bool + session_role: + version_added: '2.8' + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + target_roles: + description: + - A list of existing role (user/group) names to set as the + default permissions for database objects subsequently created by them. + - Parameter I(target_roles) is only available with C(type=default_privs). + type: str + version_added: '2.8' + grant_option: + description: + - Whether C(role) may grant/revoke the specified privileges/group memberships to others. + - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes. + - I(grant_option) only has an effect if I(state) is C(present). + type: bool + aliases: + - admin_option + host: + description: + - Database host address. If unspecified, connect via Unix socket. + type: str + aliases: + - login_host + port: + description: + - Database port to connect to. + type: int + default: 5432 + aliases: + - login_port + unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + aliases: + - login_unix_socket + login: + description: + - The username to authenticate with. + type: str + default: postgres + aliases: + - login_user + password: + description: + - The password to authenticate with. + type: str + aliases: + - login_password + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + version_added: '2.3' + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + version_added: '2.3' + type: str + aliases: + - ssl_rootcert + +notes: +- Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) + have singular alias names (I(priv), I(obj), I(role)). +- To revoke only C(GRANT OPTION) for a specific object, set I(state) to + C(present) and I(grant_option) to C(no) (see examples). +- Note that when revoking privileges from a role R, this role may still have + access via privileges granted to any role R is a member of including C(PUBLIC). +- Note that when revoking privileges from a role R, you do so as the user + specified via I(login). If R has been granted the same privileges by + another user also, R can still access database objects via these privileges. +- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). + +seealso: +- module: postgresql_user +- module: postgresql_owner +- module: postgresql_membership +- name: PostgreSQL privileges + description: General information about PostgreSQL privileges. + link: https://www.postgresql.org/docs/current/ddl-priv.html +- name: PostgreSQL GRANT command reference + description: Complete reference of the PostgreSQL GRANT command documentation. + link: https://www.postgresql.org/docs/current/sql-grant.html +- name: PostgreSQL REVOKE command reference + description: Complete reference of the PostgreSQL REVOKE command documentation. + link: https://www.postgresql.org/docs/current/sql-revoke.html + +extends_documentation_fragment: +- postgres + +author: +- Bernhard Weitzhofer (@b6d) +- Tobias Birkefeld (@tcraxs) +''' + +EXAMPLES = r''' +# On database "library": +# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors +# TO librarian, reader WITH GRANT OPTION +- name: Grant privs to librarian and reader on database library + postgresql_privs: + database: library + state: present + privs: SELECT,INSERT,UPDATE + type: table + objs: books,authors + schema: public + roles: librarian,reader + grant_option: yes + +- name: Same as above leveraging default values + postgresql_privs: + db: library + privs: SELECT,INSERT,UPDATE + objs: books,authors + roles: librarian,reader + grant_option: yes + +# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader +# Note that role "reader" will be *granted* INSERT privilege itself if this +# isn't already the case (since state: present). +- name: Revoke privs from reader + postgresql_privs: + db: library + state: present + priv: INSERT + obj: books + role: reader + grant_option: no + +# "public" is the default schema. This also works for PostgreSQL 8.x. +- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader + postgresql_privs: + db: library + state: absent + privs: INSERT,UPDATE + objs: ALL_IN_SCHEMA + role: reader + +- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian + postgresql_privs: + db: library + privs: ALL + type: schema + objs: public,math + role: librarian + +# Note the separation of arguments with colons. +- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader + postgresql_privs: + db: library + privs: ALL + type: function + obj: add(int:int) + schema: math + roles: librarian,reader + +# Note that group role memberships apply cluster-wide and therefore are not +# restricted to database "library" here. +- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION + postgresql_privs: + db: library + type: group + objs: librarian,reader + roles: alice,bob + admin_option: yes + +# Note that here "db: postgres" specifies the database to connect to, not the +# database to grant privileges on (which is specified via the "objs" param) +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: postgres + privs: ALL + type: database + obj: library + role: librarian + +# If objs is omitted for type "database", it defaults to the database +# to which the connection is established +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: library + privs: ALL + type: database + role: librarian + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: library + objs: ALL_DEFAULT + privs: ALL + type: default_privs + role: librarian + grant_option: yes + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1 + postgresql_privs: + db: library + objs: TABLES,SEQUENCES + privs: SELECT + type: default_privs + role: reader + +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2 + postgresql_privs: + db: library + objs: TYPES + privs: USAGE + type: default_privs + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader + postgresql_privs: + db: test + objs: fdw + privs: ALL + type: foreign_data_wrapper + role: reader + +# Available since version 2.10 +- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader + postgresql_privs: + db: test + objs: customtype + privs: ALL + type: type + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader + postgresql_privs: + db: test + objs: fdw_server + privs: ALL + type: foreign_server + role: reader + +# Available since version 2.8 +# Grant 'execute' permissions on all functions in schema 'common' to role 'caller' +- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller + postgresql_privs: + type: function + state: present + privs: EXECUTE + roles: caller + objs: ALL_IN_SCHEMA + schema: common + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader +# GRANT SELECT privileges for new TABLES objects created by librarian as +# default to the role reader. +# For specific +- name: ALTER privs + postgresql_privs: + db: library + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader +# REVOKE SELECT privileges for new TABLES objects created by librarian as +# default from the role reader. +# For specific +- name: ALTER privs + postgresql_privs: + db: library + state: absent + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since version 2.10 +- name: Grant type privileges for pg_catalog.numeric type to alice + postgresql_privs: + type: type + roles: alice + privs: ALL + objs: numeric + schema: pg_catalog + db: acme +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";'] + version_added: '2.8' +''' + +import traceback + +PSYCOPG2_IMP_ERR = None +try: + import psycopg2 + import psycopg2.extensions +except ImportError: + PSYCOPG2_IMP_ERR = traceback.format_exc() + psycopg2 = None + +# import module snippets +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.database import pg_quote_identifier +from ansible.module_utils.postgres import postgres_common_argument_spec +from ansible.module_utils._text import to_native + +VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', + 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', + 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) +VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'), + 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'), + 'FUNCTIONS': ('ALL', 'EXECUTE'), + 'TYPES': ('ALL', 'USAGE')} + +executed_queries = [] + + +class Error(Exception): + pass + + +def role_exists(module, cursor, rolname): + """Check user exists or not""" + query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname + try: + cursor.execute(query) + return cursor.rowcount > 0 + + except Exception as e: + module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + + return False + + +# We don't have functools.partial in Python < 2.5 +def partial(f, *args, **kwargs): + """Partial function application""" + + def g(*g_args, **g_kwargs): + new_kwargs = kwargs.copy() + new_kwargs.update(g_kwargs) + return f(*(args + g_args), **g_kwargs) + + g.f = f + g.args = args + g.kwargs = kwargs + return g + + +class Connection(object): + """Wrapper around a psycopg2 connection with some convenience methods""" + + def __init__(self, params, module): + self.database = params.database + self.module = module + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "host": "host", + "login": "user", + "password": "password", + "port": "port", + "database": "database", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + + kw = dict((params_map[k], getattr(params, k)) for k in params_map + if getattr(params, k) != '' and getattr(params, k) is not None) + + # If a unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and params.unix_socket != "": + kw["host"] = params.unix_socket + + sslrootcert = params.ca_cert + if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: + raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter') + + self.connection = psycopg2.connect(**kw) + self.cursor = self.connection.cursor() + + def commit(self): + self.connection.commit() + + def rollback(self): + self.connection.rollback() + + @property + def encoding(self): + """Connection encoding in Python-compatible form""" + return psycopg2.extensions.encodings[self.connection.encoding] + + # Methods for querying database objects + + # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like + # phrases in GRANT or REVOKE statements, therefore alternative methods are + # provided here. + + def schema_exists(self, schema): + query = """SELECT count(*) + FROM pg_catalog.pg_namespace WHERE nspname = %s""" + self.cursor.execute(query, (schema,)) + return self.cursor.fetchone()[0] > 0 + + def get_all_tables_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_sequences_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S'""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_functions_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT p.proname, oidvectortypes(p.proargtypes) + FROM pg_catalog.pg_proc p + JOIN pg_namespace n ON n.oid = p.pronamespace + WHERE nspname = %s""" + self.cursor.execute(query, (schema,)) + return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()] + + # Methods for getting access control lists and group membership info + + # To determine whether anything has changed after granting/revoking + # privileges, we compare the access control lists of the specified database + # objects before and afterwards. Python's list/string comparison should + # suffice for change detection, we should not actually have to parse ACLs. + # The same should apply to group membership information. + + def get_table_acls(self, schema, tables): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, tables)) + return [t[0] for t in self.cursor.fetchall()] + + def get_sequence_acls(self, schema, sequences): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, sequences)) + return [t[0] for t in self.cursor.fetchall()] + + def get_function_acls(self, schema, function_signatures): + funcnames = [f.split('(', 1)[0] for f in function_signatures] + query = """SELECT proacl + FROM pg_catalog.pg_proc p + JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace + WHERE nspname = %s AND proname = ANY (%s) + ORDER BY proname, proargtypes""" + self.cursor.execute(query, (schema, funcnames)) + return [t[0] for t in self.cursor.fetchall()] + + def get_schema_acls(self, schemas): + query = """SELECT nspacl FROM pg_catalog.pg_namespace + WHERE nspname = ANY (%s) ORDER BY nspname""" + self.cursor.execute(query, (schemas,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_language_acls(self, languages): + query = """SELECT lanacl FROM pg_catalog.pg_language + WHERE lanname = ANY (%s) ORDER BY lanname""" + self.cursor.execute(query, (languages,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_tablespace_acls(self, tablespaces): + query = """SELECT spcacl FROM pg_catalog.pg_tablespace + WHERE spcname = ANY (%s) ORDER BY spcname""" + self.cursor.execute(query, (tablespaces,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_database_acls(self, databases): + query = """SELECT datacl FROM pg_catalog.pg_database + WHERE datname = ANY (%s) ORDER BY datname""" + self.cursor.execute(query, (databases,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_group_memberships(self, groups): + query = """SELECT roleid, grantor, member, admin_option + FROM pg_catalog.pg_auth_members am + JOIN pg_catalog.pg_roles r ON r.oid = am.roleid + WHERE r.rolname = ANY(%s) + ORDER BY roleid, grantor, member""" + self.cursor.execute(query, (groups,)) + return self.cursor.fetchall() + + def get_default_privs(self, schema, *args): + query = """SELECT defaclacl + FROM pg_default_acl a + JOIN pg_namespace b ON a.defaclnamespace=b.oid + WHERE b.nspname = %s;""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_data_wrapper_acls(self, fdws): + query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper + WHERE fdwname = ANY (%s) ORDER BY fdwname""" + self.cursor.execute(query, (fdws,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_server_acls(self, fs): + query = """SELECT srvacl FROM pg_catalog.pg_foreign_server + WHERE srvname = ANY (%s) ORDER BY srvname""" + self.cursor.execute(query, (fs,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_type_acls(self, schema, types): + query = """SELECT t.typacl FROM pg_catalog.pg_type t + JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname""" + self.cursor.execute(query, (schema, types)) + return [t[0] for t in self.cursor.fetchall()] + + # Manipulating privileges + + def manipulate_privs(self, obj_type, privs, objs, roles, target_roles, + state, grant_option, schema_qualifier=None, fail_on_role=True): + """Manipulate database object privileges. + + :param obj_type: Type of database object to grant/revoke + privileges for. + :param privs: Either a list of privileges to grant/revoke + or None if type is "group". + :param objs: List of database objects to grant/revoke + privileges for. + :param roles: Either a list of role names or "PUBLIC" + for the implicitly defined "PUBLIC" group + :param target_roles: List of role names to grant/revoke + default privileges as. + :param state: "present" to grant privileges, "absent" to revoke. + :param grant_option: Only for state "present": If True, set + grant/admin option. If False, revoke it. + If None, don't change grant option. + :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", + "FUNCTION") must be qualified by schema. + Ignored for other Types. + """ + # get_status: function to get current status + if obj_type == 'table': + get_status = partial(self.get_table_acls, schema_qualifier) + elif obj_type == 'sequence': + get_status = partial(self.get_sequence_acls, schema_qualifier) + elif obj_type == 'function': + get_status = partial(self.get_function_acls, schema_qualifier) + elif obj_type == 'schema': + get_status = self.get_schema_acls + elif obj_type == 'language': + get_status = self.get_language_acls + elif obj_type == 'tablespace': + get_status = self.get_tablespace_acls + elif obj_type == 'database': + get_status = self.get_database_acls + elif obj_type == 'group': + get_status = self.get_group_memberships + elif obj_type == 'default_privs': + get_status = partial(self.get_default_privs, schema_qualifier) + elif obj_type == 'foreign_data_wrapper': + get_status = self.get_foreign_data_wrapper_acls + elif obj_type == 'foreign_server': + get_status = self.get_foreign_server_acls + elif obj_type == 'type': + get_status = partial(self.get_type_acls, schema_qualifier) + else: + raise Error('Unsupported database object type "%s".' % obj_type) + + # Return False (nothing has changed) if there are no objs to work on. + if not objs: + return False + + # obj_ids: quoted db object identifiers (sometimes schema-qualified) + if obj_type == 'function': + obj_ids = [] + for obj in objs: + try: + f, args = obj.split('(', 1) + except Exception: + raise Error('Illegal function signature: "%s".' % obj) + obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) + elif obj_type in ['table', 'sequence', 'type']: + obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] + else: + obj_ids = ['"%s"' % o for o in objs] + + # set_what: SQL-fragment specifying what to set for the target roles: + # Either group membership or privileges on objects of a certain type + if obj_type == 'group': + set_what = ','.join('"%s"' % i for i in obj_ids) + elif obj_type == 'default_privs': + # We don't want privs to be quoted here + set_what = ','.join(privs) + else: + # function types are already quoted above + if obj_type != 'function': + obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] + # Note: obj_type has been checked against a set of string literals + # and privs was escaped when it was parsed + # Note: Underscores are replaced with spaces to support multi-word obj_type + set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '), + ','.join(obj_ids)) + + # for_whom: SQL-fragment specifying for whom to set the above + if roles == 'PUBLIC': + for_whom = 'PUBLIC' + else: + for_whom = [] + for r in roles: + if not role_exists(self.module, self.cursor, r): + if fail_on_role: + self.module.fail_json(msg="Role '%s' does not exist" % r.strip()) + + else: + self.module.warn("Role '%s' does not exist, pass it" % r.strip()) + else: + for_whom.append('"%s"' % r) + + if not for_whom: + return False + + for_whom = ','.join(for_whom) + + # as_who: + as_who = None + if target_roles: + as_who = ','.join('"%s"' % r for r in target_roles) + + status_before = get_status(objs) + + query = QueryBuilder(state) \ + .for_objtype(obj_type) \ + .with_grant_option(grant_option) \ + .for_whom(for_whom) \ + .as_who(as_who) \ + .for_schema(schema_qualifier) \ + .set_what(set_what) \ + .for_objs(objs) \ + .build() + + executed_queries.append(query) + self.cursor.execute(query) + status_after = get_status(objs) + + def nonesorted(e): + # For python 3+ that can fail trying + # to compare NoneType elements by sort method. + if e is None: + return '' + return e + + status_before.sort(key=nonesorted) + status_after.sort(key=nonesorted) + return status_before != status_after + + +class QueryBuilder(object): + def __init__(self, state): + self._grant_option = None + self._for_whom = None + self._as_who = None + self._set_what = None + self._obj_type = None + self._state = state + self._schema = None + self._objs = None + self.query = [] + + def for_objs(self, objs): + self._objs = objs + return self + + def for_schema(self, schema): + self._schema = schema + return self + + def with_grant_option(self, option): + self._grant_option = option + return self + + def for_whom(self, who): + self._for_whom = who + return self + + def as_who(self, target_roles): + self._as_who = target_roles + return self + + def set_what(self, what): + self._set_what = what + return self + + def for_objtype(self, objtype): + self._obj_type = objtype + return self + + def build(self): + if self._state == 'present': + self.build_present() + elif self._state == 'absent': + self.build_absent() + else: + self.build_absent() + return '\n'.join(self.query) + + def add_default_revoke(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + + def add_grant_option(self): + if self._grant_option: + if self._obj_type == 'group': + self.query[-1] += ' WITH ADMIN OPTION;' + else: + self.query[-1] += ' WITH GRANT OPTION;' + else: + self.query[-1] += ';' + if self._obj_type == 'group': + self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + elif not self._obj_type == 'default_privs': + self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + + def add_default_priv(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who, + self._schema, + self._set_what, + obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema, + self._set_what, + obj, + self._for_whom)) + self.add_grant_option() + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who, + self._schema, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom)) + self.add_grant_option() + + def build_present(self): + if self._obj_type == 'default_privs': + self.add_default_revoke() + self.add_default_priv() + else: + self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom)) + self.add_grant_option() + + def build_absent(self): + if self._obj_type == 'default_privs': + self.query = [] + for obj in ['TABLES', 'SEQUENCES', 'TYPES']: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + else: + self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom)) + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + database=dict(required=True, aliases=['db', 'login_db']), + state=dict(default='present', choices=['present', 'absent']), + privs=dict(required=False, aliases=['priv']), + type=dict(default='table', + choices=['table', + 'sequence', + 'function', + 'database', + 'schema', + 'language', + 'tablespace', + 'group', + 'default_privs', + 'foreign_data_wrapper', + 'foreign_server', + 'type', ]), + objs=dict(required=False, aliases=['obj']), + schema=dict(required=False), + roles=dict(required=True, aliases=['role']), + session_role=dict(required=False), + target_roles=dict(required=False), + grant_option=dict(required=False, type='bool', + aliases=['admin_option']), + host=dict(default='', aliases=['login_host']), + unix_socket=dict(default='', aliases=['login_unix_socket']), + login=dict(default='postgres', aliases=['login_user']), + password=dict(default='', aliases=['login_password'], no_log=True), + fail_on_role=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + fail_on_role = module.params['fail_on_role'] + + # Create type object as namespace for module params + p = type('Params', (), module.params) + # param "schema": default, allowed depends on param "type" + if p.type in ['table', 'sequence', 'function', 'type', 'default_privs']: + p.schema = p.schema or 'public' + elif p.schema: + module.fail_json(msg='Argument "schema" is not allowed ' + 'for type "%s".' % p.type) + + # param "objs": default, required depends on param "type" + if p.type == 'database': + p.objs = p.objs or p.database + elif not p.objs: + module.fail_json(msg='Argument "objs" is required ' + 'for type "%s".' % p.type) + + # param "privs": allowed, required depends on param "type" + if p.type == 'group': + if p.privs: + module.fail_json(msg='Argument "privs" is not allowed ' + 'for type "group".') + elif not p.privs: + module.fail_json(msg='Argument "privs" is required ' + 'for type "%s".' % p.type) + + # Connect to Database + if not psycopg2: + module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) + try: + conn = Connection(p, module) + except psycopg2.Error as e: + module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert') + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + except ValueError as e: + # We raise this when the psycopg library is too old + module.fail_json(msg=to_native(e)) + + if p.session_role: + try: + conn.cursor.execute('SET ROLE "%s"' % p.session_role) + except Exception as e: + module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc()) + + try: + # privs + if p.privs: + privs = frozenset(pr.upper() for pr in p.privs.split(',')) + if not privs.issubset(VALID_PRIVS): + module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) + else: + privs = None + # objs: + if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_tables_in_schema(p.schema) + elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_sequences_in_schema(p.schema) + elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_functions_in_schema(p.schema) + elif p.type == 'default_privs': + if p.objs == 'ALL_DEFAULT': + objs = frozenset(VALID_DEFAULT_OBJS.keys()) + else: + objs = frozenset(obj.upper() for obj in p.objs.split(',')) + if not objs.issubset(VALID_DEFAULT_OBJS): + module.fail_json( + msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys())) + # Again, do we have valid privs specified for object type: + valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj])) + if not valid_objects_for_priv == objs: + module.fail_json( + msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format( + valid_objects_for_priv, objs)) + else: + objs = p.objs.split(',') + + # function signatures are encoded using ':' to separate args + if p.type == 'function': + objs = [obj.replace(':', ',') for obj in objs] + + # roles + if p.roles == 'PUBLIC': + roles = 'PUBLIC' + else: + roles = p.roles.split(',') + + if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]): + module.exit_json(changed=False) + + if fail_on_role: + module.fail_json(msg="Role '%s' does not exist" % roles[0].strip()) + + else: + module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip()) + + # check if target_roles is set with type: default_privs + if p.target_roles and not p.type == 'default_privs': + module.warn('"target_roles" will be ignored ' + 'Argument "type: default_privs" is required for usage of "target_roles".') + + # target roles + if p.target_roles: + target_roles = p.target_roles.split(',') + else: + target_roles = None + + changed = conn.manipulate_privs( + obj_type=p.type, + privs=privs, + objs=objs, + roles=roles, + target_roles=target_roles, + state=p.state, + grant_option=p.grant_option, + schema_qualifier=p.schema, + fail_on_role=fail_on_role, + ) + + except Error as e: + conn.rollback() + module.fail_json(msg=e.message, exception=traceback.format_exc()) + + except psycopg2.Error as e: + conn.rollback() + module.fail_json(msg=to_native(e.message)) + + if module.check_mode: + conn.rollback() + else: + conn.commit() + module.exit_json(changed=changed, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_query.py b/test/support/integration/plugins/modules/postgresql_query.py new file mode 100644 index 00000000..18d63e33 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_query.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Felix Archambault +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: postgresql_query +short_description: Run PostgreSQL queries +description: +- Runs arbitrary PostgreSQL queries. +- Can run queries from SQL script files. +- Does not run against backup files. Use M(postgresql_db) with I(state=restore) + to run queries on files made by pg_dump/pg_dumpall utilities. +version_added: '2.8' +options: + query: + description: + - SQL query to run. Variables can be escaped with psycopg2 syntax + U(http://initd.org/psycopg/docs/usage.html). + type: str + positional_args: + description: + - List of values to be passed as positional arguments to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(named_args). + type: list + elements: raw + named_args: + description: + - Dictionary of key-value arguments to pass to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(positional_args). + type: dict + path_to_script: + description: + - Path to SQL script on the remote host. + - Returns result of the last query in the script. + - Mutually exclusive with I(query). + type: path + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - login_db + autocommit: + description: + - Execute in autocommit mode when the query can't be run inside a transaction block + (e.g., VACUUM). + - Mutually exclusive with I(check_mode). + type: bool + default: no + version_added: '2.9' + encoding: + description: + - Set the client encoding for the current session (e.g. C(UTF-8)). + - The default is the encoding defined by the database. + type: str + version_added: '2.10' +seealso: +- module: postgresql_db +author: +- Felix Archambault (@archf) +- Andrew Klychkov (@Andersson007) +- Will Rouesnel (@wrouesnel) +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Simple select query to acme db + postgresql_query: + db: acme + query: SELECT version() + +- name: Select query to db acme with positional arguments and non-default credentials + postgresql_query: + db: acme + login_user: django + login_password: mysecretpass + query: SELECT * FROM acme WHERE id = %s AND story = %s + positional_args: + - 1 + - test + +- name: Select query to test_db with named_args + postgresql_query: + db: test_db + query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s + named_args: + id_val: 1 + story_val: test + +- name: Insert query to test_table in db test_db + postgresql_query: + db: test_db + query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story') + +- name: Run queries from SQL script using UTF-8 client encoding for session + postgresql_query: + db: test_db + path_to_script: /var/lib/pgsql/test.sql + positional_args: + - 1 + encoding: UTF-8 + +- name: Example of using autocommit parameter + postgresql_query: + db: test_db + query: VACUUM + autocommit: yes + +- name: > + Insert data to the column of array type using positional_args. + Note that we use quotes here, the same as for passing JSON, etc. + postgresql_query: + query: INSERT INTO test_table (array_column) VALUES (%s) + positional_args: + - '{1,2,3}' + +# Pass list and string vars as positional_args +- name: Set vars + set_fact: + my_list: + - 1 + - 2 + - 3 + my_arr: '{1, 2, 3}' + +- name: Select from test table by passing positional_args as arrays + postgresql_query: + query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s + positional_args: + - '{{ my_list }}' + - '{{ my_arr|string }}' +''' + +RETURN = r''' +query: + description: Query that was tried to be executed. + returned: always + type: str + sample: 'SELECT * FROM bar' +statusmessage: + description: Attribute containing the message returned by the command. + returned: always + type: str + sample: 'INSERT 0 1' +query_result: + description: + - List of dictionaries in column:value form representing returned rows. + returned: changed + type: list + sample: [{"Column": "Value1"},{"Column": "Value2"}] +rowcount: + description: Number of affected rows. + returned: changed + type: int + sample: 5 +''' + +try: + from psycopg2 import ProgrammingError as Psycopg2ProgrammingError + from psycopg2.extras import DictCursor +except ImportError: + # it is needed for checking 'no result to fetch' in main(), + # psycopg2 availability will be checked by connect_to_db() into + # ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems + + +# =========================================== +# Module execution. +# + +def list_to_pg_array(elem): + """Convert the passed list to PostgreSQL array + represented as a string. + + Args: + elem (list): List that needs to be converted. + + Returns: + elem (str): String representation of PostgreSQL array. + """ + elem = str(elem).strip('[]') + elem = '{' + elem + '}' + return elem + + +def convert_elements_to_pg_arrays(obj): + """Convert list elements of the passed object + to PostgreSQL arrays represented as strings. + + Args: + obj (dict or list): Object whose elements need to be converted. + + Returns: + obj (dict or list): Object with converted elements. + """ + if isinstance(obj, dict): + for (key, elem) in iteritems(obj): + if isinstance(elem, list): + obj[key] = list_to_pg_array(elem) + + elif isinstance(obj, list): + for i, elem in enumerate(obj): + if isinstance(elem, list): + obj[i] = list_to_pg_array(elem) + + return obj + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + query=dict(type='str'), + db=dict(type='str', aliases=['login_db']), + positional_args=dict(type='list', elements='raw'), + named_args=dict(type='dict'), + session_role=dict(type='str'), + path_to_script=dict(type='path'), + autocommit=dict(type='bool', default=False), + encoding=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=(('positional_args', 'named_args'),), + supports_check_mode=True, + ) + + query = module.params["query"] + positional_args = module.params["positional_args"] + named_args = module.params["named_args"] + path_to_script = module.params["path_to_script"] + autocommit = module.params["autocommit"] + encoding = module.params["encoding"] + + if autocommit and module.check_mode: + module.fail_json(msg="Using autocommit is mutually exclusive with check_mode") + + if path_to_script and query: + module.fail_json(msg="path_to_script is mutually exclusive with query") + + if positional_args: + positional_args = convert_elements_to_pg_arrays(positional_args) + + elif named_args: + named_args = convert_elements_to_pg_arrays(named_args) + + if path_to_script: + try: + with open(path_to_script, 'rb') as f: + query = to_native(f.read()) + except Exception as e: + module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e))) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=autocommit) + if encoding is not None: + db_connection.set_client_encoding(encoding) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Prepare args: + if module.params.get("positional_args"): + arguments = module.params["positional_args"] + elif module.params.get("named_args"): + arguments = module.params["named_args"] + else: + arguments = None + + # Set defaults: + changed = False + + # Execute query: + try: + cursor.execute(query, arguments) + except Exception as e: + if not autocommit: + db_connection.rollback() + + cursor.close() + db_connection.close() + module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e))) + + statusmessage = cursor.statusmessage + rowcount = cursor.rowcount + + try: + query_result = [dict(row) for row in cursor.fetchall()] + except Psycopg2ProgrammingError as e: + if to_native(e) == 'no results to fetch': + query_result = {} + + except Exception as e: + module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e)) + + if 'SELECT' not in statusmessage: + if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage: + s = statusmessage.split() + if len(s) == 3: + if statusmessage.split()[2] != '0': + changed = True + + elif len(s) == 2: + if statusmessage.split()[1] != '0': + changed = True + + else: + changed = True + + else: + changed = True + + if module.check_mode: + db_connection.rollback() + else: + if not autocommit: + db_connection.commit() + + kw = dict( + changed=changed, + query=cursor.query, + statusmessage=statusmessage, + query_result=query_result, + rowcount=rowcount if rowcount >= 0 else 0, + ) + + cursor.close() + db_connection.close() + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_set.py b/test/support/integration/plugins/modules/postgresql_set.py new file mode 100644 index 00000000..cfbdae64 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_set.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_set +short_description: Change a PostgreSQL server configuration parameter +description: + - Allows to change a PostgreSQL server configuration parameter. + - The module uses ALTER SYSTEM command and applies changes by reload server configuration. + - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster. + - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file. + - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file, + which is read in addition to postgresql.conf. + - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter + string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required). + - After change you can see in the ansible output the previous and + the new parameter value and other information using returned values and M(debug) module. +version_added: '2.8' +options: + name: + description: + - Name of PostgreSQL server parameter. + type: str + required: true + value: + description: + - Parameter value to set. + - To remove parameter string from postgresql.auto.conf and + reload the server configuration you must pass I(value=default). + With I(value=default) the playbook always returns changed is true. + type: str + reset: + description: + - Restore parameter to initial state (boot_val). Mutually exclusive with I(value). + type: bool + default: false + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db +notes: +- Supported version of PostgreSQL is 9.4 and later. +- Pay attention, change setting with 'postmaster' context can return changed is true + when actually nothing changes because the same value may be presented in + several different form, for example, 1024MB, 1GB, etc. However in pg_settings + system view it can be defined like 131072 number of 8kB pages. + The final check of the parameter value cannot compare it because the server was + not restarted and the value in pg_settings is not updated yet. +- For some parameters restart of PostgreSQL server is required. + See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html). +seealso: +- module: postgresql_info +- name: PostgreSQL server configuration + description: General information about PostgreSQL server configuration. + link: https://www.postgresql.org/docs/current/runtime-config.html +- name: PostgreSQL view pg_settings reference + description: Complete reference of the pg_settings view documentation. + link: https://www.postgresql.org/docs/current/view-pg-settings.html +- name: PostgreSQL ALTER SYSTEM command reference + description: Complete reference of the ALTER SYSTEM command documentation. + link: https://www.postgresql.org/docs/current/sql-altersystem.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Restore wal_keep_segments parameter to initial state + postgresql_set: + name: wal_keep_segments + reset: yes + +# Set work_mem parameter to 32MB and show what's been changed and restart is required or not +# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False") +- name: Set work mem parameter + postgresql_set: + name: work_mem + value: 32mb + register: set + +- debug: + msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}" + when: set.changed +# Ensure that the restart of PostgreSQL server must be required for some parameters. +# In this situation you see the same parameter in prev_val and value_prettyue, but 'changed=True' +# (If you passed the value that was different from the current server setting). + +- name: Set log_min_duration_statement parameter to 1 second + postgresql_set: + name: log_min_duration_statement + value: 1s + +- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf) + postgresql_set: + name: wal_log_hints + value: default +''' + +RETURN = r''' +name: + description: Name of PostgreSQL server parameter. + returned: always + type: str + sample: 'shared_buffers' +restart_required: + description: Information about parameter current state. + returned: always + type: bool + sample: true +prev_val_pretty: + description: Information about previous state of the parameter. + returned: always + type: str + sample: '4MB' +value_pretty: + description: Information about current state of the parameter. + returned: always + type: str + sample: '64MB' +value: + description: + - Dictionary that contains the current parameter value (at the time of playbook finish). + - Pay attention that for real change some parameters restart of PostgreSQL server is required. + - Returns the current value in the check mode. + returned: always + type: dict + sample: { "value": 67108864, "unit": "b" } +context: + description: + - PostgreSQL setting context. + returned: always + type: str + sample: user +''' + +try: + from psycopg2.extras import DictCursor +except Exception: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native + +PG_REQ_VER = 90400 + +# To allow to set value like 1mb instead of 1MB, etc: +POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb") + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def param_get(cursor, module, name): + query = ("SELECT name, setting, unit, context, boot_val " + "FROM pg_settings WHERE name = %(name)s") + try: + cursor.execute(query, {'name': name}) + info = cursor.fetchall() + cursor.execute("SHOW %s" % name) + val = cursor.fetchone() + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + raw_val = info[0][1] + unit = info[0][2] + context = info[0][3] + boot_val = info[0][4] + + if val[0] == 'True': + val[0] = 'on' + elif val[0] == 'False': + val[0] = 'off' + + if unit == 'kB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 + + unit = 'b' + + elif unit == 'MB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 * 1024 + + unit = 'b' + + return (val[0], raw_val, unit, boot_val, context) + + +def pretty_to_bytes(pretty_val): + # The function returns a value in bytes + # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'. + # Otherwise it returns the passed argument. + + val_in_bytes = None + + if 'kB' in pretty_val: + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 + + elif 'MB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 + + elif 'GB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 * 1024 + + elif 'TB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024 + + elif 'B' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part + + else: + return pretty_val + + return val_in_bytes + + +def param_set(cursor, module, name, value, context): + try: + if str(value).lower() == 'default': + query = "ALTER SYSTEM SET %s = DEFAULT" % name + else: + query = "ALTER SYSTEM SET %s = '%s'" % (name, value) + cursor.execute(query) + + if context != 'postmaster': + cursor.execute("SELECT pg_reload_conf()") + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + return True + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(type='str', required=True), + db=dict(type='str', aliases=['login_db']), + value=dict(type='str'), + reset=dict(type='bool'), + session_role=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params["name"] + value = module.params["value"] + reset = module.params["reset"] + + # Allow to pass values like 1mb instead of 1MB, etc: + if value: + for unit in POSSIBLE_SIZE_UNITS: + if value[:-2].isdigit() and unit in value[-2:]: + value = value.upper() + + if value and reset: + module.fail_json(msg="%s: value and reset params are mutually exclusive" % name) + + if not value and not reset: + module.fail_json(msg="%s: at least one of value or reset param must be specified" % name) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + kw = {} + # Check server version (needs 9.4 or later): + ver = db_connection.server_version + if ver < PG_REQ_VER: + module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER)) + kw = dict( + changed=False, + restart_required=False, + value_pretty="", + prev_val_pretty="", + value={"value": "", "unit": ""}, + ) + kw['name'] = name + db_connection.close() + module.exit_json(**kw) + + # Set default returned values: + restart_required = False + changed = False + kw['name'] = name + kw['restart_required'] = False + + # Get info about param state: + res = param_get(cursor, module, name) + current_value = res[0] + raw_val = res[1] + unit = res[2] + boot_val = res[3] + context = res[4] + + if value == 'True': + value = 'on' + elif value == 'False': + value = 'off' + + kw['prev_val_pretty'] = current_value + kw['value_pretty'] = deepcopy(kw['prev_val_pretty']) + kw['context'] = context + + # Do job + if context == "internal": + module.fail_json(msg="%s: cannot be changed (internal context). See " + "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name) + + if context == "postmaster": + restart_required = True + + # If check_mode, just compare and exit: + if module.check_mode: + if pretty_to_bytes(value) == pretty_to_bytes(current_value): + kw['changed'] = False + + else: + kw['value_pretty'] = value + kw['changed'] = True + + # Anyway returns current raw value in the check_mode: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + kw['restart_required'] = restart_required + module.exit_json(**kw) + + # Set param: + if value and value != current_value: + changed = param_set(cursor, module, name, value, context) + + kw['value_pretty'] = value + + # Reset param: + elif reset: + if raw_val == boot_val: + # nothing to change, exit: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + module.exit_json(**kw) + + changed = param_set(cursor, module, name, boot_val, context) + + if restart_required: + module.warn("Restart of PostgreSQL is required for setting %s" % name) + + cursor.close() + db_connection.close() + + # Reconnect and recheck current value: + if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'): + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + res = param_get(cursor, module, name) + # f_ means 'final' + f_value = res[0] + f_raw_val = res[1] + + if raw_val == f_raw_val: + changed = False + + else: + changed = True + + kw['value_pretty'] = f_value + kw['value'] = dict( + value=f_raw_val, + unit=unit, + ) + + cursor.close() + db_connection.close() + + kw['changed'] = changed + kw['restart_required'] = restart_required + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_table.py b/test/support/integration/plugins/modules/postgresql_table.py new file mode 100644 index 00000000..3bef03b0 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_table.py @@ -0,0 +1,601 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_table +short_description: Create, drop, or modify a PostgreSQL table +description: +- Allows to create, drop, rename, truncate a table, or change some table attributes. +version_added: '2.8' +options: + table: + description: + - Table name. + required: true + aliases: + - name + type: str + state: + description: + - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename). + type: str + default: present + choices: [ absent, present ] + tablespace: + description: + - Set a tablespace for the table. + required: false + type: str + owner: + description: + - Set a table owner. + type: str + unlogged: + description: + - Create an unlogged table. + type: bool + default: no + like: + description: + - Create a table like another table (with similar DDL). + Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + including: + description: + - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL. + Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + columns: + description: + - Columns that are needed. + type: list + elements: str + rename: + description: + - New table name. Mutually exclusive with I(tablespace), I(owner), + I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params). + type: str + truncate: + description: + - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(rename), and I(storage_params). + type: bool + default: no + storage_params: + description: + - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc. + Mutually exclusive with I(rename) and I(truncate). + type: list + elements: str + db: + description: + - Name of database to connect and where the table will be created. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + cascade: + description: + - Automatically drop objects that depend on the table (such as views). + Used with I(state=absent) only. + type: bool + default: no + version_added: '2.9' +notes: +- If you do not pass db parameter, tables will be created in the database + named postgres. +- PostgreSQL allows to create columnless table, so columns param is optional. +- Unlogged tables are available from PostgreSQL server version 9.1. +seealso: +- module: postgresql_sequence +- module: postgresql_idx +- module: postgresql_info +- module: postgresql_tablespace +- module: postgresql_owner +- module: postgresql_privs +- module: postgresql_copy +- name: CREATE TABLE reference + description: Complete reference of the CREATE TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-createtable.html +- name: ALTER TABLE reference + description: Complete reference of the ALTER TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-altertable.html +- name: DROP TABLE reference + description: Complete reference of the DROP TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-droptable.html +- name: PostgreSQL data types + description: Complete reference of the PostgreSQL data types documentation. + link: https://www.postgresql.org/docs/current/datatype.html +author: +- Andrei Klychkov (@Andersson007) +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner + postgresql_table: + db: acme + name: tbl2 + like: tbl1 + owner: testuser + +- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes + postgresql_table: + db: acme + table: tbl2 + like: tbl1 + including: comments, indexes + tablespace: ssd + +- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1 + postgresql_table: + name: test_table + columns: + - id bigserial primary key + - num bigint + - stories text + tablespace: ssd + storage_params: + - fillfactor=10 + - autovacuum_analyze_threshold=1 + +- name: Create an unlogged table in schema acme + postgresql_table: + name: acme.useless_data + columns: waste_id int + unlogged: true + +- name: Rename table foo to bar + postgresql_table: + table: foo + rename: bar + +- name: Rename table foo from schema acme to bar + postgresql_table: + name: acme.foo + rename: bar + +- name: Set owner to someuser + postgresql_table: + name: foo + owner: someuser + +- name: Change tablespace of foo table to new_tablespace and set owner to new_user + postgresql_table: + name: foo + tablespace: new_tablespace + owner: new_user + +- name: Truncate table foo + postgresql_table: + name: foo + truncate: yes + +- name: Drop table foo from schema acme + postgresql_table: + name: acme.foo + state: absent + +- name: Drop table bar cascade + postgresql_table: + name: bar + state: absent + cascade: yes +''' + +RETURN = r''' +table: + description: Name of a table. + returned: always + type: str + sample: 'foo' +state: + description: Table state. + returned: always + type: str + sample: 'present' +owner: + description: Table owner. + returned: always + type: str + sample: 'postgres' +tablespace: + description: Tablespace. + returned: always + type: str + sample: 'ssd_tablespace' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'CREATE TABLE "test_table" (id bigint)' ] +storage_params: + description: Storage parameters. + returned: always + type: list + sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.database import pg_quote_identifier +from ansible.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class Table(object): + def __init__(self, name, module, cursor): + self.name = name + self.module = module + self.cursor = cursor + self.info = { + 'owner': '', + 'tblspace': '', + 'storage_params': [], + } + self.exists = False + self.__exists_in_db() + self.executed_queries = [] + + def get_info(self): + """Getter to refresh and get table info""" + self.__exists_in_db() + + def __exists_in_db(self): + """Check table exists and refresh info""" + if "." in self.name: + schema = self.name.split('.')[-2] + tblname = self.name.split('.')[-1] + else: + schema = 'public' + tblname = self.name + + query = ("SELECT t.tableowner, t.tablespace, c.reloptions " + "FROM pg_tables AS t " + "INNER JOIN pg_class AS c ON c.relname = t.tablename " + "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid " + "WHERE t.tablename = %(tblname)s " + "AND n.nspname = %(schema)s") + res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema}, + add_to_executed=False) + if res: + self.exists = True + self.info = dict( + owner=res[0][0], + tblspace=res[0][1] if res[0][1] else '', + storage_params=res[0][2] if res[0][2] else [], + ) + + return True + else: + self.exists = False + return False + + def create(self, columns='', params='', tblspace='', + unlogged=False, owner=''): + """ + Create table. + If table exists, check passed args (params, tblspace, owner) and, + if they're different from current, change them. + Arguments: + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + columns - column string (comma separated). + """ + name = pg_quote_identifier(self.name, 'table') + + changed = False + + if self.exists: + if tblspace == 'pg_default' and self.info['tblspace'] is None: + pass # Because they have the same meaning + elif tblspace and self.info['tblspace'] != tblspace: + self.set_tblspace(tblspace) + changed = True + + if owner and self.info['owner'] != owner: + self.set_owner(owner) + changed = True + + if params: + param_list = [p.strip(' ') for p in params.split(',')] + + new_param = False + for p in param_list: + if p not in self.info['storage_params']: + new_param = True + + if new_param: + self.set_stor_params(params) + changed = True + + if changed: + return True + return False + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + if columns: + query += " (%s)" % columns + else: + query += " ()" + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database') + + if exec_sql(self, query, ddl=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def create_like(self, src_table, including='', tblspace='', + unlogged=False, params='', owner=''): + """ + Create table like another table (with similar DDL). + Arguments: + src_table - source table. + including - corresponds to optional INCLUDING expression + in CREATE TABLE ... LIKE statement. + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + """ + changed = False + + name = pg_quote_identifier(self.name, 'table') + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + query += " (LIKE %s" % pg_quote_identifier(src_table, 'table') + + if including: + including = including.split(',') + for i in including: + query += " INCLUDING %s" % i + + query += ')' + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database') + + if exec_sql(self, query, ddl=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def truncate(self): + query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table') + return exec_sql(self, query, ddl=True) + + def rename(self, newname): + query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(newname, 'table')) + return exec_sql(self, query, ddl=True) + + def set_owner(self, username): + query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(username, 'role')) + return exec_sql(self, query, ddl=True) + + def drop(self, cascade=False): + if not self.exists: + return False + + query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table') + if cascade: + query += " CASCADE" + return exec_sql(self, query, ddl=True) + + def set_tblspace(self, tblspace): + query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(tblspace, 'database')) + return exec_sql(self, query, ddl=True) + + def set_stor_params(self, params): + query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params) + return exec_sql(self, query, ddl=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + table=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default="present", choices=["absent", "present"]), + db=dict(type='str', default='', aliases=['login_db']), + tablespace=dict(type='str'), + owner=dict(type='str'), + unlogged=dict(type='bool', default=False), + like=dict(type='str'), + including=dict(type='str'), + rename=dict(type='str'), + truncate=dict(type='bool', default=False), + columns=dict(type='list', elements='str'), + storage_params=dict(type='list', elements='str'), + session_role=dict(type='str'), + cascade=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + table = module.params["table"] + state = module.params["state"] + tablespace = module.params["tablespace"] + owner = module.params["owner"] + unlogged = module.params["unlogged"] + like = module.params["like"] + including = module.params["including"] + newname = module.params["rename"] + storage_params = module.params["storage_params"] + truncate = module.params["truncate"] + columns = module.params["columns"] + cascade = module.params["cascade"] + + if state == 'present' and cascade: + module.warn("cascade=true is ignored when state=present") + + # Check mutual exclusive parameters: + if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including): + module.fail_json(msg="%s: state=absent is mutually exclusive with: " + "truncate, rename, columns, tablespace, " + "including, like, storage_params, unlogged, owner" % table) + + if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: truncate is mutually exclusive with: " + "rename, columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if newname and (columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: rename is mutually exclusive with: " + "columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if like and columns: + module.fail_json(msg="%s: like and columns params are mutually exclusive" % table) + if including and not like: + module.fail_json(msg="%s: including param needs like param specified" % table) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + if storage_params: + storage_params = ','.join(storage_params) + + if columns: + columns = ','.join(columns) + + ############## + # Do main job: + table_obj = Table(table, module, cursor) + + # Set default returned values: + changed = False + kw = {} + kw['table'] = table + kw['state'] = '' + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + + if state == 'absent': + changed = table_obj.drop(cascade=cascade) + + elif truncate: + changed = table_obj.truncate() + + elif newname: + changed = table_obj.rename(newname) + q = table_obj.executed_queries + table_obj = Table(newname, module, cursor) + table_obj.executed_queries = q + + elif state == 'present' and not like: + changed = table_obj.create(columns, storage_params, + tablespace, unlogged, owner) + + elif state == 'present' and like: + changed = table_obj.create_like(like, including, tablespace, + unlogged, storage_params) + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + # Refresh table info for RETURN. + # Note, if table has been renamed, it gets info by newname: + table_obj.get_info() + db_connection.commit() + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + else: + # We just change the table state here + # to keep other information about the dropped table: + kw['state'] = 'absent' + + kw['queries'] = table_obj.executed_queries + kw['changed'] = changed + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_user.py b/test/support/integration/plugins/modules/postgresql_user.py new file mode 100644 index 00000000..10afd0a0 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_user.py @@ -0,0 +1,927 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_user +short_description: Add or remove a user (role) from a PostgreSQL server instance +description: +- Adds or removes a user (role) from a PostgreSQL server instance + ("cluster" in PostgreSQL terminology) and, optionally, + grants the user access to an existing database or tables. +- A user is a role with login privilege. +- The fundamental function of the module is to create, or delete, users from + a PostgreSQL instances. Privilege assignment, or removal, is an optional + step, which works on one database at a time. This allows for the module to + be called several times in the same module to modify the permissions on + different databases, or to grant permissions to already existing users. +- A user cannot be removed until all the privileges have been stripped from + the user. In such situation, if the module tries to remove the user it + will fail. To avoid this from happening the fail_on_user option signals + the module to try to remove the user, but if not possible keep going; the + module will report if changes happened and separately if the user was + removed or not. +version_added: '0.6' +options: + name: + description: + - Name of the user (role) to add or remove. + type: str + required: true + aliases: + - user + password: + description: + - Set the user's password, before 1.4 this was required. + - Password can be passed unhashed or hashed (MD5-hashed). + - Unhashed password will automatically be hashed when saved into the + database if C(encrypted) parameter is set, otherwise it will be save in + plain text format. + - When passing a hashed password it must be generated with the format + C('str["md5"] + md5[ password + username ]'), resulting in a total of + 35 characters. An easy way to do this is C(echo "md5$(echo -n + 'verysecretpasswordJOE' | md5sum | awk '{print $1}')"). + - Note that if the provided password string is already in MD5-hashed + format, then it is used as-is, regardless of C(encrypted) parameter. + type: str + db: + description: + - Name of database to connect to and where user's permissions will be granted. + type: str + aliases: + - login_db + fail_on_user: + description: + - If C(yes), fail when user (role) can't be removed. Otherwise just log and continue. + default: 'yes' + type: bool + aliases: + - fail_on_role + priv: + description: + - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where + privileges can be defined for database ( allowed options - 'CREATE', + 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or + for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE', + 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example + C(table:SELECT) ). Mixed example of this string: + C(CONNECT/CREATE/table1:SELECT/table2:INSERT)." + type: str + role_attr_flags: + description: + - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER." + - Note that '[NO]CREATEUSER' is deprecated. + - To create a simple role for using it like a group, use C(NOLOGIN) flag. + type: str + choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB', + '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ] + session_role: + version_added: '2.8' + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + state: + description: + - The user (role) state. + type: str + default: present + choices: [ absent, present ] + encrypted: + description: + - Whether the password is stored hashed in the database. + - Passwords can be passed already hashed or unhashed, and postgresql + ensures the stored password is hashed when C(encrypted) is set. + - "Note: Postgresql 10 and newer doesn't support unhashed passwords." + - Previous to Ansible 2.6, this was C(no) by default. + default: 'yes' + type: bool + version_added: '1.4' + expires: + description: + - The date at which the user's password is to expire. + - If set to C('infinity'), user's password never expire. + - Note that this value should be a valid SQL date and time type. + type: str + version_added: '1.4' + no_password_changes: + description: + - If C(yes), don't inspect database for password changes. Effective when + C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make + password changes as necessary. + default: 'no' + type: bool + version_added: '2.0' + conn_limit: + description: + - Specifies the user (role) connection limit. + type: int + version_added: '2.4' + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + version_added: '2.3' + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + version_added: '2.3' + groups: + description: + - The list of groups (roles) that need to be granted to the user. + type: list + elements: str + version_added: '2.9' + comment: + description: + - Add a comment on the user (equal to the COMMENT ON ROLE statement result). + type: str + version_added: '2.10' +notes: +- The module creates a user (role) with login privilege by default. + Use NOLOGIN role_attr_flags to change this behaviour. +- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles). + You may not specify password or role_attr_flags when the PUBLIC user is specified. +seealso: +- module: postgresql_privs +- module: postgresql_membership +- module: postgresql_owner +- name: PostgreSQL database roles + description: Complete reference of the PostgreSQL database roles documentation. + link: https://www.postgresql.org/docs/current/user-manag.html +author: +- Ansible Core Team +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Connect to acme database, create django user, and grant access to database and products table + postgresql_user: + db: acme + name: django + password: ceec4eif7ya + priv: "CONNECT/products:ALL" + expires: "Jan 31 2020" + +- name: Add a comment on django user + postgresql_user: + db: acme + name: django + comment: This is a test user + +# Connect to default database, create rails user, set its password (MD5-hashed), +# and grant privilege to create other databases and demote rails from super user status if user exists +- name: Create rails user, set MD5-hashed password, grant privs + postgresql_user: + name: rails + password: md59543f1d82624df2b31672ec0f7050460 + role_attr_flags: CREATEDB,NOSUPERUSER + +- name: Connect to acme database and remove test user privileges from there + postgresql_user: + db: acme + name: test + priv: "ALL/products:ALL" + state: absent + fail_on_user: no + +- name: Connect to test database, remove test user from cluster + postgresql_user: + db: test + name: test + priv: ALL + state: absent + +- name: Connect to acme database and set user's password with no expire date + postgresql_user: + db: acme + name: django + password: mysupersecretword + priv: "CONNECT/products:ALL" + expires: infinity + +# Example privileges string format +# INSERT,UPDATE/table:SELECT/anothertable:ALL + +- name: Connect to test database and remove an existing user's password + postgresql_user: + db: test + user: test + password: "" + +- name: Create user test and grant group user_ro and user_rw to it + postgresql_user: + name: test + groups: + - user_ro + - user_rw +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"'] + version_added: '2.8' +''' + +import itertools +import re +import traceback +from hashlib import md5 + +try: + import psycopg2 + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.database import pg_quote_identifier, SQLParseError +from ansible.module_utils.postgres import ( + connect_to_db, + get_conn_params, + PgMembership, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.six import iteritems + + +FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') +FLAGS_BY_VERSION = {'BYPASSRLS': 90500} + +VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), + database=frozenset( + ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), + ) + +# map to cope with idiosyncracies of SUPERUSER and LOGIN +PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole', + CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin', + REPLICATION='rolreplication', BYPASSRLS='rolbypassrls') + +executed_queries = [] + + +class InvalidFlagsError(Exception): + pass + + +class InvalidPrivsError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def user_exists(cursor, user): + # The PUBLIC user is a special case that is always there + if user == 'PUBLIC': + return True + query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s" + cursor.execute(query, {'user': user}) + return cursor.rowcount > 0 + + +def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit): + """Create a new database user (role).""" + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + query_password_data = dict(password=password, expires=expires) + query = ['CREATE USER "%(user)s"' % + {"user": user}] + if password is not None and password != '': + query.append("WITH %(crypt)s" % {"crypt": encrypted}) + query.append("PASSWORD %(password)s") + if expires is not None: + query.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query.append(role_attr_flags) + query = ' '.join(query) + executed_queries.append(query) + cursor.execute(query, query_password_data) + return True + + +def user_should_we_change_password(current_role_attrs, user, password, encrypted): + """Check if we should change the user's password. + + Compare the proposed password with the existing one, comparing + hashes if encrypted. If we can't access it assume yes. + """ + + if current_role_attrs is None: + # on some databases, E.g. AWS RDS instances, there is no access to + # the pg_authid relation to check the pre-existing password, so we + # just assume password is different + return True + + # Do we actually need to do anything? + pwchanging = False + if password is not None: + # Empty password means that the role shouldn't have a password, which + # means we need to check if the current password is None. + if password == '': + if current_role_attrs['rolpassword'] is not None: + pwchanging = True + # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits + # 3: The size of the 'md5' prefix + # When the provided password looks like a MD5-hash, value of + # 'encrypted' is ignored. + elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED': + if password != current_role_attrs['rolpassword']: + pwchanging = True + elif encrypted == 'ENCRYPTED': + hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest()) + if hashed_password != current_role_attrs['rolpassword']: + pwchanging = True + + return pwchanging + + +def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit): + """Change user password and/or attributes. Return True if changed, False otherwise.""" + changed = False + + cursor = db_connection.cursor(cursor_factory=DictCursor) + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + if user == 'PUBLIC': + if password is not None: + module.fail_json(msg="cannot change the password for PUBLIC user") + elif role_attr_flags != '': + module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user") + else: + return False + + # Handle passwords. + if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None): + # Select password and all flag-like columns in order to verify changes. + try: + select = "SELECT * FROM pg_authid where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError: + current_role_attrs = None + db_connection.rollback() + + pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted) + + if current_role_attrs is None: + try: + # AWS RDS instances does not allow user to access pg_authid + # so try to get current_role_attrs from pg_roles tables + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes from pg_roles + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError as e: + db_connection.rollback() + module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e)) + + role_attr_flags_changing = False + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if expires is not None: + cursor.execute("SELECT %s::timestamptz;", (expires,)) + expires_with_tz = cursor.fetchone()[0] + expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil') + else: + expires_changing = False + + conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit']) + + if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing: + return False + + alter = ['ALTER USER "%(user)s"' % {"user": user}] + if pwchanging: + if password != '': + alter.append("WITH %(crypt)s" % {"crypt": encrypted}) + alter.append("PASSWORD %(password)s") + else: + alter.append("WITH PASSWORD NULL") + alter.append(role_attr_flags) + elif role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + if expires is not None: + alter.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + + query_password_data = dict(password=password, expires=expires) + try: + cursor.execute(' '.join(alter), query_password_data) + changed = True + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + except psycopg2.NotSupportedError as e: + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + + elif no_password_changes and role_attr_flags != '': + # Grab role information from pg_roles instead of pg_authid + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + + role_attr_flags_changing = False + + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if not role_attr_flags_changing: + return False + + alter = ['ALTER USER "%(user)s"' % + {"user": user}] + if role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + + try: + cursor.execute(' '.join(alter)) + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + + # Grab new role attributes. + cursor.execute(select, {"user": user}) + new_role_attrs = cursor.fetchone() + + # Detect any differences between current_ and new_role_attrs. + changed = current_role_attrs != new_role_attrs + + return changed + + +def user_delete(cursor, user): + """Try to remove a user. Returns True if successful otherwise False""" + cursor.execute("SAVEPOINT ansible_pgsql_user_delete") + try: + query = 'DROP USER "%s"' % user + executed_queries.append(query) + cursor.execute(query) + except Exception: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return False + + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return True + + +def has_table_privileges(cursor, user, table, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_table_privileges(cursor, user, table) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def get_table_privileges(cursor, user, table): + if '.' in table: + schema, table = table.split('.', 1) + else: + schema = 'public' + query = ("SELECT privilege_type FROM information_schema.role_table_grants " + "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s") + cursor.execute(query, {'user': user, 'table': table, 'schema': schema}) + return frozenset([x[0] for x in cursor.fetchall()]) + + +def grant_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'GRANT %s ON TABLE %s TO "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def revoke_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'REVOKE %s ON TABLE %s FROM "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def get_database_privileges(cursor, user, db): + priv_map = { + 'C': 'CREATE', + 'T': 'TEMPORARY', + 'c': 'CONNECT', + } + query = 'SELECT datacl FROM pg_database WHERE datname = %s' + cursor.execute(query, (db,)) + datacl = cursor.fetchone()[0] + if datacl is None: + return set() + r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl) + if r is None: + return set() + o = set() + for v in r.group(1): + o.add(priv_map[v]) + return normalize_privileges(o, 'database') + + +def has_database_privileges(cursor, user, db, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_database_privileges(cursor, user, db) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def grant_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'GRANT %s ON DATABASE %s TO "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'REVOKE %s ON DATABASE %s FROM "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_privileges(cursor, user, privs): + if privs is None: + return False + + revoke_funcs = dict(table=revoke_table_privileges, + database=revoke_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested to be removed are + # currently granted to the user + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[0]: + revoke_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def grant_privileges(cursor, user, privs): + if privs is None: + return False + + grant_funcs = dict(table=grant_table_privileges, + database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested for the user are + # currently missing + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[2]: + grant_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def parse_role_attrs(cursor, role_attr_flags): + """ + Parse role attributes string for user creation. + Format: + + attributes[,attributes,...] + + Where: + + attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... + [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB", + "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION", + "[NO]BYPASSRLS" ] + + Note: "[NO]BYPASSRLS" role attribute introduced in 9.5 + Note: "[NO]CREATEUSER" role attribute is deprecated. + + """ + flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role) + + valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor))) + valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags))) + + if not flags.issubset(valid_flags): + raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % + ' '.join(flags.difference(valid_flags))) + + return ' '.join(flags) + + +def normalize_privileges(privs, type_): + new_privs = set(privs) + if 'ALL' in new_privs: + new_privs.update(VALID_PRIVS[type_]) + new_privs.remove('ALL') + if 'TEMP' in new_privs: + new_privs.add('TEMPORARY') + new_privs.remove('TEMP') + + return new_privs + + +def parse_privs(privs, db): + """ + Parse privilege string to determine permissions for database db. + Format: + + privileges[/privileges/...] + + Where: + + privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] | + TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...] + """ + if privs is None: + return privs + + o_privs = { + 'database': {}, + 'table': {} + } + for token in privs.split('/'): + if ':' not in token: + type_ = 'database' + name = db + priv_set = frozenset(x.strip().upper() + for x in token.split(',') if x.strip()) + else: + type_ = 'table' + name, privileges = token.split(':', 1) + priv_set = frozenset(x.strip().upper() + for x in privileges.split(',') if x.strip()) + + if not priv_set.issubset(VALID_PRIVS[type_]): + raise InvalidPrivsError('Invalid privs specified for %s: %s' % + (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) + + priv_set = normalize_privileges(priv_set, type_) + o_privs[type_][name] = priv_set + + return o_privs + + +def get_valid_flags_by_version(cursor): + """ + Some role attributes were introduced after certain versions. We want to + compile a list of valid flags against the current Postgres version. + """ + current_version = cursor.connection.server_version + + return [ + flag + for flag, version_introduced in FLAGS_BY_VERSION.items() + if current_version >= version_introduced + ] + + +def get_comment(cursor, user): + """Get user's comment.""" + query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') " + "FROM pg_catalog.pg_roles r " + "WHERE r.rolname = %(user)s") + cursor.execute(query, {'user': user}) + return cursor.fetchone()[0] + + +def add_comment(cursor, user, comment): + """Add comment on user.""" + if comment != get_comment(cursor, user): + query = 'COMMENT ON ROLE "%s" IS ' % user + cursor.execute(query + '%(comment)s', {'comment': comment}) + executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment})) + return True + else: + return False + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + user=dict(type='str', required=True, aliases=['name']), + password=dict(type='str', default=None, no_log=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + priv=dict(type='str', default=None), + db=dict(type='str', default='', aliases=['login_db']), + fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']), + role_attr_flags=dict(type='str', default=''), + encrypted=dict(type='bool', default='yes'), + no_password_changes=dict(type='bool', default='no'), + expires=dict(type='str', default=None), + conn_limit=dict(type='int', default=None), + session_role=dict(type='str'), + groups=dict(type='list', elements='str'), + comment=dict(type='str', default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + user = module.params["user"] + password = module.params["password"] + state = module.params["state"] + fail_on_user = module.params["fail_on_user"] + if module.params['db'] == '' and module.params["priv"] is not None: + module.fail_json(msg="privileges require a database to be specified") + privs = parse_privs(module.params["priv"], module.params["db"]) + no_password_changes = module.params["no_password_changes"] + if module.params["encrypted"]: + encrypted = "ENCRYPTED" + else: + encrypted = "UNENCRYPTED" + expires = module.params["expires"] + conn_limit = module.params["conn_limit"] + role_attr_flags = module.params["role_attr_flags"] + groups = module.params["groups"] + if groups: + groups = [e.strip() for e in groups] + comment = module.params["comment"] + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + role_attr_flags = parse_role_attrs(cursor, role_attr_flags) + except InvalidFlagsError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + kw = dict(user=user) + changed = False + user_removed = False + + if state == "present": + if user_exists(cursor, user): + try: + changed = user_alter(db_connection, module, user, password, + role_attr_flags, encrypted, expires, no_password_changes, conn_limit) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + else: + try: + changed = user_add(cursor, user, password, + role_attr_flags, encrypted, expires, conn_limit) + except psycopg2.ProgrammingError as e: + module.fail_json(msg="Unable to add user with given requirement " + "due to : %s" % to_native(e), + exception=traceback.format_exc()) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + try: + changed = grant_privileges(cursor, user, privs) or changed + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + if groups: + target_roles = [] + target_roles.append(user) + pg_membership = PgMembership(module, cursor, groups, target_roles) + changed = pg_membership.grant() or changed + executed_queries.extend(pg_membership.executed_queries) + + if comment is not None: + try: + changed = add_comment(cursor, user, comment) or changed + except Exception as e: + module.fail_json(msg='Unable to add comment on role: %s' % to_native(e), + exception=traceback.format_exc()) + + else: + if user_exists(cursor, user): + if module.check_mode: + changed = True + kw['user_removed'] = True + else: + try: + changed = revoke_privileges(cursor, user, privs) + user_removed = user_delete(cursor, user) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + changed = changed or user_removed + if fail_on_user and not user_removed: + msg = "Unable to remove user" + module.fail_json(msg=msg) + kw['user_removed'] = user_removed + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + kw['queries'] = executed_queries + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/rabbitmq_plugin.py b/test/support/integration/plugins/modules/rabbitmq_plugin.py new file mode 100644 index 00000000..301bbfe2 --- /dev/null +++ b/test/support/integration/plugins/modules/rabbitmq_plugin.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Chatham Financial <oss@chathamfinancial.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_plugin +short_description: Manage RabbitMQ plugins +description: + - This module can be used to enable or disable RabbitMQ plugins. +version_added: "1.1" +author: + - Chris Hoffman (@chrishoffman) +options: + names: + description: + - Comma-separated list of plugin names. Also, accepts plugin name. + required: true + aliases: [name] + new_only: + description: + - Only enable missing plugins. + - Does not disable plugins that are not in the names list. + type: bool + default: "no" + state: + description: + - Specify if plugins are to be enabled or disabled. + default: enabled + choices: [enabled, disabled] + prefix: + description: + - Specify a custom install prefix to a Rabbit. + version_added: "1.3" +''' + +EXAMPLES = ''' +- name: Enables the rabbitmq_management plugin + rabbitmq_plugin: + names: rabbitmq_management + state: enabled + +- name: Enable multiple rabbitmq plugins + rabbitmq_plugin: + names: rabbitmq_management,rabbitmq_management_visualiser + state: enabled + +- name: Disable plugin + rabbitmq_plugin: + names: rabbitmq_management + state: disabled + +- name: Enable every plugin in list with existing plugins + rabbitmq_plugin: + names: rabbitmq_management,rabbitmq_management_visualiser,rabbitmq_shovel,rabbitmq_shovel_management + state: enabled + new_only: 'yes' +''' + +RETURN = ''' +enabled: + description: list of plugins enabled during task run + returned: always + type: list + sample: ["rabbitmq_management"] +disabled: + description: list of plugins disabled during task run + returned: always + type: list + sample: ["rabbitmq_management"] +''' + +import os +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqPlugins(object): + + def __init__(self, module): + self.module = module + bin_path = '' + if module.params['prefix']: + if os.path.isdir(os.path.join(module.params['prefix'], 'bin')): + bin_path = os.path.join(module.params['prefix'], 'bin') + elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')): + bin_path = os.path.join(module.params['prefix'], 'sbin') + else: + # No such path exists. + module.fail_json(msg="No binary folder in prefix %s" % module.params['prefix']) + + self._rabbitmq_plugins = os.path.join(bin_path, "rabbitmq-plugins") + else: + self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmq_plugins] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get_all(self): + list_output = self._exec(['list', '-E', '-m'], True) + plugins = [] + for plugin in list_output: + if not plugin: + break + plugins.append(plugin) + + return plugins + + def enable(self, name): + self._exec(['enable', name]) + + def disable(self, name): + self._exec(['disable', name]) + + +def main(): + arg_spec = dict( + names=dict(required=True, aliases=['name']), + new_only=dict(default='no', type='bool'), + state=dict(default='enabled', choices=['enabled', 'disabled']), + prefix=dict(required=False, default=None) + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + result = dict() + names = module.params['names'].split(',') + new_only = module.params['new_only'] + state = module.params['state'] + + rabbitmq_plugins = RabbitMqPlugins(module) + enabled_plugins = rabbitmq_plugins.get_all() + + enabled = [] + disabled = [] + if state == 'enabled': + if not new_only: + for plugin in enabled_plugins: + if " " in plugin: + continue + if plugin not in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + for name in names: + if name not in enabled_plugins: + rabbitmq_plugins.enable(name) + enabled.append(name) + else: + for plugin in enabled_plugins: + if plugin in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + result['changed'] = len(enabled) > 0 or len(disabled) > 0 + result['enabled'] = enabled + result['disabled'] = disabled + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/rabbitmq_queue.py b/test/support/integration/plugins/modules/rabbitmq_queue.py new file mode 100644 index 00000000..567ec813 --- /dev/null +++ b/test/support/integration/plugins/modules/rabbitmq_queue.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Manuel Sousa <manuel.sousa@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_queue +author: Manuel Sousa (@manuel-sousa) +version_added: "2.0" + +short_description: Manage rabbitMQ queues +description: + - This module uses rabbitMQ Rest API to create/delete queues +requirements: [ "requests >= 1.0.0" ] +options: + name: + description: + - Name of the queue + required: true + state: + description: + - Whether the queue should be present or absent + choices: [ "present", "absent" ] + default: present + durable: + description: + - whether queue is durable or not + type: bool + default: 'yes' + auto_delete: + description: + - if the queue should delete itself after all queues/queues unbound from it + type: bool + default: 'no' + message_ttl: + description: + - How long a message can live in queue before it is discarded (milliseconds) + default: forever + auto_expires: + description: + - How long a queue can be unused before it is automatically deleted (milliseconds) + default: forever + max_length: + description: + - How many messages can the queue contain before it starts rejecting + default: no limit + dead_letter_exchange: + description: + - Optional name of an exchange to which messages will be republished if they + - are rejected or expire + dead_letter_routing_key: + description: + - Optional replacement routing key to use when a message is dead-lettered. + - Original routing key will be used if unset + max_priority: + description: + - Maximum number of priority levels for the queue to support. + - If not set, the queue will not support message priorities. + - Larger numbers indicate higher priority. + version_added: "2.4" + arguments: + description: + - extra arguments for queue. If defined this argument is a key/value dictionary + default: {} +extends_documentation_fragment: + - rabbitmq +''' + +EXAMPLES = ''' +# Create a queue +- rabbitmq_queue: + name: myQueue + +# Create a queue on remote host +- rabbitmq_queue: + name: myRemoteQueue + login_user: user + login_password: secret + login_host: remote.example.org +''' + +import json +import traceback + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib import parse as urllib_parse +from ansible.module_utils.rabbitmq import rabbitmq_argument_spec + + +def main(): + + argument_spec = rabbitmq_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + durable=dict(default=True, type='bool'), + auto_delete=dict(default=False, type='bool'), + message_ttl=dict(default=None, type='int'), + auto_expires=dict(default=None, type='int'), + max_length=dict(default=None, type='int'), + dead_letter_exchange=dict(default=None, type='str'), + dead_letter_routing_key=dict(default=None, type='str'), + arguments=dict(default=dict(), type='dict'), + max_priority=dict(default=None, type='int') + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + url = "%s://%s:%s/api/queues/%s/%s" % ( + module.params['login_protocol'], + module.params['login_host'], + module.params['login_port'], + urllib_parse.quote(module.params['vhost'], ''), + module.params['name'] + ) + + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR) + + result = dict(changed=False, name=module.params['name']) + + # Check if queue already exists + r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + if r.status_code == 200: + queue_exists = True + response = r.json() + elif r.status_code == 404: + queue_exists = False + response = r.text + else: + module.fail_json( + msg="Invalid response from RESTAPI when trying to check if queue exists", + details=r.text + ) + + if module.params['state'] == 'present': + change_required = not queue_exists + else: + change_required = queue_exists + + # Check if attributes change on existing queue + if not change_required and r.status_code == 200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['auto_delete'] and + ( + ('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or + ('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None) + ) and + ( + ('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or + ('x-expires' not in response['arguments'] and module.params['auto_expires'] is None) + ) and + ( + ('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or + ('x-max-length' not in response['arguments'] and module.params['max_length'] is None) + ) and + ( + ('x-dead-letter-exchange' in response['arguments'] and + response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or + ('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None) + ) and + ( + ('x-dead-letter-routing-key' in response['arguments'] and + response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or + ('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None) + ) and + ( + ('x-max-priority' in response['arguments'] and + response['arguments']['x-max-priority'] == module.params['max_priority']) or + ('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None) + ) + ): + module.fail_json( + msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues", + ) + + # Copy parameters to arguments as used by RabbitMQ + for k, v in { + 'message_ttl': 'x-message-ttl', + 'auto_expires': 'x-expires', + 'max_length': 'x-max-length', + 'dead_letter_exchange': 'x-dead-letter-exchange', + 'dead_letter_routing_key': 'x-dead-letter-routing-key', + 'max_priority': 'x-max-priority' + }.items(): + if module.params[k] is not None: + module.params['arguments'][v] = module.params[k] + + # Exit if check_mode + if module.check_mode: + result['changed'] = change_required + result['details'] = response + result['arguments'] = module.params['arguments'] + module.exit_json(**result) + + # Do changes + if change_required: + if module.params['state'] == 'present': + r = requests.put( + url, + auth=(module.params['login_user'], module.params['login_password']), + headers={"content-type": "application/json"}, + data=json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['auto_delete'], + "arguments": module.params['arguments'] + }), + verify=module.params['ca_cert'], + cert=(module.params['client_cert'], module.params['client_key']) + ) + elif module.params['state'] == 'absent': + r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + # RabbitMQ 3.6.7 changed this response code from 204 to 201 + if r.status_code == 204 or r.status_code == 201: + result['changed'] = True + module.exit_json(**result) + else: + module.fail_json( + msg="Error creating queue", + status=r.status_code, + details=r.text + ) + + else: + module.exit_json( + changed=False, + name=module.params['name'] + ) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/s3_bucket.py b/test/support/integration/plugins/modules/s3_bucket.py new file mode 100644 index 00000000..f35cf53b --- /dev/null +++ b/test/support/integration/plugins/modules/s3_bucket.py @@ -0,0 +1,740 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: s3_bucket +short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID +description: + - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID +version_added: "2.0" +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + force: + description: + - When trying to delete a bucket, delete all keys (including versions and delete markers) + in the bucket first (an s3 bucket must be empty for a successful deletion) + type: bool + default: 'no' + name: + description: + - Name of the s3 bucket + required: true + type: str + policy: + description: + - The JSON policy as a string. + type: json + s3_url: + description: + - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc. + - Assumes AWS if not specified. + - For Walrus, use FQDN of the endpoint without scheme nor path. + aliases: [ S3_URL ] + type: str + ceph: + description: + - Enable API compatibility with Ceph. It takes into account the S3 API subset working + with Ceph in order to provide the same module behaviour where possible. + type: bool + version_added: "2.2" + requester_pays: + description: + - With Requester Pays buckets, the requester instead of the bucket owner pays the cost + of the request and the data download from the bucket. + type: bool + default: False + state: + description: + - Create or remove the s3 bucket + required: false + default: present + choices: [ 'present', 'absent' ] + type: str + tags: + description: + - tags dict to apply to bucket + type: dict + purge_tags: + description: + - whether to remove tags that aren't present in the C(tags) parameter + type: bool + default: True + version_added: "2.9" + versioning: + description: + - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended) + type: bool + encryption: + description: + - Describes the default server-side encryption to apply to new objects in the bucket. + In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly. + choices: [ 'none', 'AES256', 'aws:kms' ] + version_added: "2.9" + type: str + encryption_key_id: + description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If + not specified then it will default to the AWS provided KMS key. + version_added: "2.9" + type: str +extends_documentation_fragment: + - aws + - ec2 +notes: + - If C(requestPayment), C(policy), C(tagging) or C(versioning) + operations/API aren't implemented by the endpoint, module doesn't fail + if each parameter satisfies the following condition. + I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None). +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a simple s3 bucket +- s3_bucket: + name: mys3bucket + state: present + +# Create a simple s3 bucket on Ceph Rados Gateway +- s3_bucket: + name: mys3bucket + s3_url: http://your-ceph-rados-gateway-server.xxx + ceph: true + +# Remove an s3 bucket and any keys it contains +- s3_bucket: + name: mys3bucket + state: absent + force: yes + +# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag +- s3_bucket: + name: mys3bucket + policy: "{{ lookup('file','policy.json') }}" + requester_pays: yes + versioning: yes + tags: + example: tag1 + another: tag2 + +# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint +- s3_bucket: + name: mydobucket + s3_url: 'https://nyc3.digitaloceanspaces.com' + +# Create a bucket with AES256 encryption +- s3_bucket: + name: mys3bucket + state: present + encryption: "AES256" + +# Create a bucket with aws:kms encryption, KMS key +- s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" + encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example" + +# Create a bucket with aws:kms encryption, default key +- s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" +''' + +import json +import os +import time + +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.six import string_types +from ansible.module_utils.basic import to_text +from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry + +try: + from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError +except ImportError: + pass # handled by AnsibleAWSModule + + +def create_or_update_bucket(s3_client, module, location): + + policy = module.params.get("policy") + name = module.params.get("name") + requester_pays = module.params.get("requester_pays") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + versioning = module.params.get("versioning") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + changed = False + result = {} + + try: + bucket_is_present = bucket_exists(s3_client, name) + except EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + try: + bucket_changed = create_bucket(s3_client, name, location) + s3_client.get_waiter('bucket_exists').wait(Bucket=name) + changed = changed or bucket_changed + except WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed while creating bucket") + + # Versioning + try: + versioning_status = get_bucket_versioning(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket versioning") + except ClientError as exp: + if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None: + module.fail_json_aws(exp, msg="Failed to get bucket versioning") + else: + if versioning is not None: + required_versioning = None + if versioning and versioning_status.get('Status') != "Enabled": + required_versioning = 'Enabled' + elif not versioning and versioning_status.get('Status') == "Enabled": + required_versioning = 'Suspended' + + if required_versioning: + try: + put_bucket_versioning(s3_client, name, required_versioning) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket versioning") + + versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) + + # This output format is there to ensure compatibility with previous versions of the module + result['versioning'] = { + 'Versioning': versioning_status.get('Status', 'Disabled'), + 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), + } + + # Requester pays + try: + requester_pays_status = get_bucket_request_payment(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket request payment") + except ClientError as exp: + if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays: + module.fail_json_aws(exp, msg="Failed to get bucket request payment") + else: + if requester_pays: + payer = 'Requester' if requester_pays else 'BucketOwner' + if requester_pays_status != payer: + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) + if requester_pays_status is None: + # We have seen that it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) + changed = True + + result['requester_pays'] = requester_pays + + # Policy + try: + current_policy = get_bucket_policy(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket policy") + except ClientError as exp: + if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None: + module.fail_json_aws(exp, msg="Failed to get bucket policy") + else: + if policy is not None: + if isinstance(policy, string_types): + policy = json.loads(policy) + + if not policy and current_policy: + try: + delete_bucket_policy(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy) + changed = True + elif compare_policies(current_policy, policy): + try: + put_bucket_policy(s3_client, name, policy) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False) + if current_policy is None: + # As for request payement, it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_policy(s3_client, name, policy) + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) + changed = True + + result['policy'] = current_policy + + # Tags + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket tags") + except ClientError as exp: + if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None: + module.fail_json_aws(exp, msg="Failed to get bucket tags") + else: + if tags is not None: + # Tags are always returned as text + tags = dict((to_text(k), to_text(v)) for k, v in tags.items()) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_copy = current_tags_dict.copy() + current_copy.update(tags) + tags = current_copy + if current_tags_dict != tags: + if tags: + try: + put_bucket_tagging(s3_client, name, tags) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket tags") + else: + if purge_tags: + try: + delete_bucket_tagging(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket tags") + current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) + changed = True + + result['tags'] = current_tags_dict + + # Encryption + if hasattr(s3_client, "get_bucket_encryption"): + try: + current_encryption = get_bucket_encryption(s3_client, name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket encryption") + elif encryption is not None: + module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41") + + if encryption is not None: + current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None + current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None + if encryption == 'none' and current_encryption_algorithm is not None: + try: + delete_bucket_encryption(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, None) + changed = True + elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id): + expected_encryption = {'SSEAlgorithm': encryption} + if encryption == 'aws:kms' and encryption_key_id is not None: + expected_encryption.update({'KMSMasterKeyID': encryption_key_id}) + try: + put_bucket_encryption(s3_client, name, expected_encryption) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to set bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption) + changed = True + + result['encryption'] = current_encryption + + module.exit_json(changed=changed, name=name, **result) + + +def bucket_exists(s3_client, bucket_name): + # head_bucket appeared to be really inconsistent, so we use list_buckets instead, + # and loop over all the buckets, even if we know it's less performant :( + all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets'] + return any(bucket['Name'] == bucket_name for bucket in all_buckets) + + +@AWSRetry.exponential_backoff(max_delay=120) +def create_bucket(s3_client, bucket_name, location): + try: + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + if len(configuration) > 0: + s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration) + else: + s3_client.create_bucket(Bucket=bucket_name) + return True + except ClientError as e: + error_code = e.response['Error']['Code'] + if error_code == 'BucketAlreadyOwnedByYou': + # We should never get there since we check the bucket presence before calling the create_or_update_bucket + # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception + return False + else: + raise e + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_tagging(s3_client, bucket_name, tags): + s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_policy(s3_client, bucket_name, policy): + s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy)) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def delete_bucket_policy(s3_client, bucket_name): + s3_client.delete_bucket_policy(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_policy(s3_client, bucket_name): + try: + current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy')) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchBucketPolicy': + current_policy = None + else: + raise e + return current_policy + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_request_payment(s3_client, bucket_name, payer): + s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_request_payment(s3_client, bucket_name): + return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer') + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_versioning(s3_client, bucket_name): + return s3_client.get_bucket_versioning(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_versioning(s3_client, bucket_name, required_versioning): + s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_encryption(s3_client, bucket_name): + try: + result = s3_client.get_bucket_encryption(Bucket=bucket_name) + return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault') + except ClientError as e: + if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError': + return None + else: + raise e + except (IndexError, KeyError): + return None + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_encryption(s3_client, bucket_name, encryption): + server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]} + s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def delete_bucket_tagging(s3_client, bucket_name): + s3_client.delete_bucket_tagging(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def delete_bucket_encryption(s3_client, bucket_name): + s3_client.delete_bucket_encryption(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120) +def delete_bucket(s3_client, bucket_name): + try: + s3_client.delete_bucket(Bucket=bucket_name) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchBucket': + # This means bucket should have been in a deleting state when we checked it existence + # We just ignore the error + pass + else: + raise e + + +def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True): + for dummy in range(0, 12): + try: + current_policy = get_bucket_policy(s3_client, bucket_name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + + if compare_policies(current_policy, expected_policy): + time.sleep(5) + else: + return current_policy + if should_fail: + module.fail_json(msg="Bucket policy failed to apply in the expected time") + else: + return None + + +def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True): + for dummy in range(0, 12): + try: + requester_pays_status = get_bucket_request_payment(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get bucket request payment") + if requester_pays_status != expected_payer: + time.sleep(5) + else: + return requester_pays_status + if should_fail: + module.fail_json(msg="Bucket request payment failed to apply in the expected time") + else: + return None + + +def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption): + for dummy in range(0, 12): + try: + encryption = get_bucket_encryption(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated encryption for bucket") + if encryption != expected_encryption: + time.sleep(5) + else: + return encryption + module.fail_json(msg="Bucket encryption failed to apply in the expected time") + + +def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning): + for dummy in range(0, 24): + try: + versioning_status = get_bucket_versioning(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated versioning for bucket") + if versioning_status.get('Status') != required_versioning: + time.sleep(8) + else: + return versioning_status + module.fail_json(msg="Bucket versioning failed to apply in the expected time") + + +def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): + for dummy in range(0, 12): + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + if current_tags_dict != expected_tags_dict: + time.sleep(5) + else: + return current_tags_dict + module.fail_json(msg="Bucket tags failed to apply in the expected time") + + +def get_current_bucket_tags_dict(s3_client, bucket_name): + try: + current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet') + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchTagSet': + return {} + raise e + + return boto3_tag_list_to_ansible_dict(current_tags) + + +def paginated_list(s3_client, **pagination_params): + pg = s3_client.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versions_list(s3_client, **pagination_params): + try: + pg = s3_client.get_paginator('list_object_versions') + for page in pg.paginate(**pagination_params): + # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion + yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] + except is_boto3_error_code('NoSuchBucket'): + yield [] + + +def destroy_bucket(s3_client, module): + + force = module.params.get("force") + name = module.params.get("name") + try: + bucket_is_present = bucket_exists(s3_client, name) + except EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + module.exit_json(changed=False) + + if force: + # if there are contents then we need to delete them (including versions) before we can delete the bucket + try: + for key_version_pairs in paginated_versions_list(s3_client, Bucket=name): + formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs] + for fk in formatted_keys: + # remove VersionId from cases where they are `None` so that + # unversioned objects are deleted using `DeleteObject` + # rather than `DeleteObjectVersion`, improving backwards + # compatibility with older IAM policies. + if not fk.get('VersionId'): + fk.pop('VersionId') + + if formatted_keys: + resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys}) + if resp.get('Errors'): + module.fail_json( + msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format( + ', '.join([k['Key'] for k in resp['Errors']]) + ), + errors=resp['Errors'], response=resp + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed while deleting bucket") + + try: + delete_bucket(s3_client, name) + s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) + except WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.') + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket") + + module.exit_json(changed=True) + + +def is_fakes3(s3_url): + """ Return True if s3_url has scheme fakes3:// """ + if s3_url is not None: + return urlparse(s3_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url): + if s3_url and ceph: # TODO - test this + ceph = urlparse(s3_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) + elif is_fakes3(s3_url): + fakes3 = urlparse(s3_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) + return boto3_conn(**params) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + force=dict(default=False, type='bool'), + policy=dict(type='json'), + name=dict(required=True), + requester_pays=dict(default=False, type='bool'), + s3_url=dict(aliases=['S3_URL']), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), + versioning=dict(type='bool'), + ceph=dict(default=False, type='bool'), + encryption=dict(choices=['none', 'AES256', 'aws:kms']), + encryption_key_id=dict() + ) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + ) + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + s3_url = module.params.get('s3_url') + ceph = module.params.get('ceph') + + # allow eucarc environment variables to be used if ansible vars aren't set + if not s3_url and 'S3_URL' in os.environ: + s3_url = os.environ['S3_URL'] + + if ceph and not s3_url: + module.fail_json(msg='ceph flavour requires s3_url') + + # Look at s3_url and tweak connection settings + # if connecting to Ceph RGW, Walrus or fakes3 + if s3_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url) + + if s3_client is None: # this should never happen + module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.') + + state = module.params.get("state") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + + # Parameter validation + if encryption_key_id is not None and encryption is None: + module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.") + elif encryption_key_id is not None and encryption != 'aws:kms': + module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.") + + if state == 'present': + create_or_update_bucket(s3_client, module, location) + elif state == 'absent': + destroy_bucket(s3_client, module) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/sefcontext.py b/test/support/integration/plugins/modules/sefcontext.py new file mode 100644 index 00000000..33e3fd2e --- /dev/null +++ b/test/support/integration/plugins/modules/sefcontext.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: +- Manages SELinux file context mapping definitions. +- Similar to the C(semanage fcontext) command. +version_added: '2.2' +options: + target: + description: + - Target path (expression). + type: str + required: yes + aliases: [ path ] + ftype: + description: + - The file type that should have SELinux contexts applied. + - "The following file type options are available:" + - C(a) for all files, + - C(b) for block devices, + - C(c) for character devices, + - C(d) for directories, + - C(f) for regular files, + - C(l) for symbolic links, + - C(p) for named pipes, + - C(s) for socket files. + type: str + choices: [ a, b, c, d, f, l, p, s ] + default: a + setype: + description: + - SELinux type for the specified target. + type: str + required: yes + seuser: + description: + - SELinux user for the specified target. + type: str + selevel: + description: + - SELinux range for the specified target. + type: str + aliases: [ serange ] + state: + description: + - Whether the SELinux file context must be C(absent) or C(present). + type: str + choices: [ absent, present ] + default: present + reload: + description: + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. + type: bool + default: yes + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + type: bool + default: no + version_added: '2.8' +notes: +- The changes are persistent across reboots. +- The M(sefcontext) module does not modify existing files to the new + SELinux context(s), so it is advisable to first create the SELinux + file contexts before creating files, or run C(restorecon) manually + for the existing files that require the new SELinux file contexts. +- Not applying SELinux fcontexts to existing files is a deliberate + decision as it would be unclear what reported changes would entail + to, and there's no guarantee that applying SELinux fcontext does + not pick up other unrelated prior changes. +requirements: +- libselinux-python +- policycoreutils-python +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Allow apache to modify files in /srv/git_repos + sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_git_rw_content_t + state: present + +- name: Apply new SELinux file context to filesystem + command: restorecon -irv /srv/git_repos +''' + +RETURN = r''' +# Default return values +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +# Add missing entries (backward compatible) +if HAVE_SEOBJECT: + seobject.file_types.update( + a=seobject.SEMANAGE_FCONTEXT_ALL, + b=seobject.SEMANAGE_FCONTEXT_BLOCK, + c=seobject.SEMANAGE_FCONTEXT_CHAR, + d=seobject.SEMANAGE_FCONTEXT_DIR, + f=seobject.SEMANAGE_FCONTEXT_REG, + l=seobject.SEMANAGE_FCONTEXT_LINK, + p=seobject.SEMANAGE_FCONTEXT_PIPE, + s=seobject.SEMANAGE_FCONTEXT_SOCK, + ) + +# Make backward compatible +option_to_file_type_str = dict( + a='all files', + b='block device', + c='character device', + d='directory', + f='regular file', + l='symbolic link', + p='named pipe', + s='socket', +) + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + + +def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + + +def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + target=dict(type='str', required=True, aliases=['path']), + ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()), + setype=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange']), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/selogin.py b/test/support/integration/plugins/modules/selogin.py new file mode 100644 index 00000000..6429ef36 --- /dev/null +++ b/test/support/integration/plugins/modules/selogin.py @@ -0,0 +1,260 @@ +#!/usr/bin/python + +# (c) 2017, Petr Lautrbach <plautrba@redhat.com> +# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com> + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: selogin +short_description: Manages linux user to SELinux user mapping +description: + - Manages linux user to SELinux user mapping +version_added: "2.8" +options: + login: + description: + - a Linux user + required: true + seuser: + description: + - SELinux user name + required: true + selevel: + aliases: [ serange ] + description: + - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. + default: s0 + state: + description: + - Desired mapping value. + required: true + default: present + choices: [ 'present', 'absent' ] + reload: + description: + - Reload SELinux policy after commit. + default: yes + ignore_selinux_state: + description: + - Run independent of selinux runtime state + type: bool + default: false +notes: + - The changes are persistent across reboots + - Not tested on any debian based system +requirements: [ 'libselinux', 'policycoreutils' ] +author: +- Dan Keder (@dankeder) +- Petr Lautrbach (@bachradsusi) +- James Cassell (@jamescassell) +''' + +EXAMPLES = ''' +# Modify the default user on the system to the guest_u user +- selogin: + login: __default__ + seuser: guest_u + state: present + +# Assign gijoe user on an MLS machine a range and to the staff_u user +- selogin: + login: gijoe + seuser: staff_u + serange: SystemLow-Secret + state: present + +# Assign all users in the engineering group to the staff_u user +- selogin: + login: '%engineering' + seuser: staff_u + state: present +''' + +RETURN = r''' +# Default return values +''' + + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): + """ Add linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + # for local_login in all_logins: + if login not in all_logins.keys(): + change = True + if not module.check_mode: + selogin.add(login, seuser, serange) + else: + if all_logins[login][0] != seuser or all_logins[login][1] != serange: + change = True + if not module.check_mode: + selogin.modify(login, seuser, serange) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_login_del(module, login, seuser, do_reload, sestore=''): + """ Delete linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + if login in all_logins.keys(): + change = True + if not module.check_mode: + selogin.delete(login) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + login=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange'], default='s0'), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + required_if=[ + ["state", "present", ["seuser"]] + ], + supports_check_mode=True + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + login = module.params['login'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = { + 'login': login, + 'seuser': seuser, + 'serange': serange, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange) + elif state == 'absent': + result['changed'] = semanage_login_del(module, login, seuser, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/synchronize.py b/test/support/integration/plugins/modules/synchronize.py new file mode 100644 index 00000000..e4c520b7 --- /dev/null +++ b/test/support/integration/plugins/modules/synchronize.py @@ -0,0 +1,618 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012-2013, Timothy Appnel <tim@appnel.com> +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: synchronize +version_added: "1.4" +short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy +description: + - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. + - It is run and originates on the local host where Ansible is being run. + - Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of + boilerplate options and host facts. + - This module is not intended to provide access to the full power of rsync, but does make the most common + invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case. +options: + src: + description: + - Path on the source host that will be synchronized to the destination. + - The path can be absolute or relative. + type: str + required: true + dest: + description: + - Path on the destination host that will be synchronized from the source. + - The path can be absolute or relative. + type: str + required: true + dest_port: + description: + - Port number for ssh on the destination host. + - Prior to Ansible 2.0, the ansible_ssh_port inventory var took precedence over this value. + - This parameter defaults to the value of C(ansible_ssh_port) or C(ansible_port), + the C(remote_port) config setting or the value from ssh client configuration + if none of the former have been set. + type: int + version_added: "1.5" + mode: + description: + - Specify the direction of the synchronization. + - In push mode the localhost or delegate is the source. + - In pull mode the remote host in context is the source. + type: str + choices: [ pull, push ] + default: push + archive: + description: + - Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D. + type: bool + default: yes + checksum: + description: + - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will + not disable it. + type: bool + default: no + version_added: "1.6" + compress: + description: + - Compress file data during the transfer. + - In most cases, leave this enabled unless it causes problems. + type: bool + default: yes + version_added: "1.7" + existing_only: + description: + - Skip creating new files on receiver. + type: bool + default: no + version_added: "1.5" + delete: + description: + - Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path. + - This option requires C(recursive=yes). + - This option ignores excluded files and behaves like the rsync opt --delete-excluded. + type: bool + default: no + dirs: + description: + - Transfer directories without recursing. + type: bool + default: no + recursive: + description: + - Recurse into directories. + - This parameter defaults to the value of the archive option. + type: bool + links: + description: + - Copy symlinks as symlinks. + - This parameter defaults to the value of the archive option. + type: bool + copy_links: + description: + - Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink. + type: bool + default: no + perms: + description: + - Preserve permissions. + - This parameter defaults to the value of the archive option. + type: bool + times: + description: + - Preserve modification times. + - This parameter defaults to the value of the archive option. + type: bool + owner: + description: + - Preserve owner (super user only). + - This parameter defaults to the value of the archive option. + type: bool + group: + description: + - Preserve group. + - This parameter defaults to the value of the archive option. + type: bool + rsync_path: + description: + - Specify the rsync command to run on the remote host. See C(--rsync-path) on the rsync man page. + - To specify the rsync command to run on the local host, you need to set this your task var C(ansible_rsync_path). + type: str + rsync_timeout: + description: + - Specify a C(--timeout) for the rsync command in seconds. + type: int + default: 0 + set_remote_user: + description: + - Put user@ for the remote paths. + - If you have a custom ssh config to define the remote user for a host + that does not match the inventory user, you should set this parameter to C(no). + type: bool + default: yes + use_ssh_args: + description: + - Use the ssh_args specified in ansible.cfg. + type: bool + default: no + version_added: "2.0" + rsync_opts: + description: + - Specify additional rsync options by passing in an array. + - Note that an empty string in C(rsync_opts) will end up transfer the current working directory. + type: list + default: + version_added: "1.6" + partial: + description: + - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. + type: bool + default: no + version_added: "2.0" + verify_host: + description: + - Verify destination host key. + type: bool + default: no + version_added: "2.0" + private_key: + description: + - Specify the private key to use for SSH-based rsync connections (e.g. C(~/.ssh/id_rsa)). + type: path + version_added: "1.6" + link_dest: + description: + - Add a destination to hard link against during the rsync. + type: list + default: + version_added: "2.5" +notes: + - rsync must be installed on both the local and remote host. + - For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host + `synchronize is connecting to`. + - The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one + remote machine. + - > + The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a + delegate_to host when delegate_to is used). + - The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active. + - In Ansible 2.0 a bug in the synchronize module made become occur on the "local host". This was fixed in Ansible 2.0.1. + - Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine + and rsync doesn't give us a way to pass sudo credentials in. + - Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been + determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and + rsync does not provide us a way to pass a password to the connection. + - Expect that dest=~/x will be ~<remote_user>/x even if using sudo. + - Inspect the verbose output to validate the destination user/host/path are what was expected. + - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory. + - rsync daemon must be up and running with correct permission when using rsync protocol in source or destination path. + - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process + encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly. + - link_destination is subject to the same limitations as the underlying rsync daemon. Hard links are only preserved if the relative subtrees + of the source and destination are the same. Attempts to hardlink into a directory that is a subdirectory of the source will be prevented. +seealso: +- module: copy +- module: win_robocopy +author: +- Timothy Appnel (@tima) +''' + +EXAMPLES = ''' +- name: Synchronization of src on the control machine to dest on the remote hosts + synchronize: + src: some/relative/path + dest: /some/absolute/path + +- name: Synchronization using rsync protocol (push) + synchronize: + src: some/relative/path/ + dest: rsync://somehost.com/path/ + +- name: Synchronization using rsync protocol (pull) + synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + +- name: Synchronization using rsync protocol on delegate host (push) + synchronize: + src: /some/absolute/path/ + dest: rsync://somehost.com/path/ + delegate_to: delegate.host + +- name: Synchronization using rsync protocol on delegate host (pull) + synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + delegate_to: delegate.host + +- name: Synchronization without any --archive options enabled + synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no + +- name: Synchronization with --archive options enabled except for --recursive + synchronize: + src: some/relative/path + dest: /some/absolute/path + recursive: no + +- name: Synchronization with --archive options enabled except for --times, with --checksum option enabled + synchronize: + src: some/relative/path + dest: /some/absolute/path + checksum: yes + times: no + +- name: Synchronization without --archive options enabled except use --links + synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no + links: yes + +- name: Synchronization of two paths both on the control machine + synchronize: + src: some/relative/path + dest: /some/absolute/path + delegate_to: localhost + +- name: Synchronization of src on the inventory host to the dest on the localhost in pull mode + synchronize: + mode: pull + src: some/relative/path + dest: /some/absolute/path + +- name: Synchronization of src on delegate host to dest on the current inventory host. + synchronize: + src: /first/absolute/path + dest: /second/absolute/path + delegate_to: delegate.host + +- name: Synchronize two directories on one remote host. + synchronize: + src: /first/absolute/path + dest: /second/absolute/path + delegate_to: "{{ inventory_hostname }}" + +- name: Synchronize and delete files in dest on the remote host that are not found in src of localhost. + synchronize: + src: some/relative/path + dest: /some/absolute/path + delete: yes + recursive: yes + +# This specific command is granted su privileges on the destination +- name: Synchronize using an alternate rsync command + synchronize: + src: some/relative/path + dest: /some/absolute/path + rsync_path: su -c rsync + +# Example .rsync-filter file in the source directory +# - var # exclude any path whose last part is 'var' +# - /var # exclude any path starting with 'var' starting at the source directory +# + /var/conf # include /var/conf even though it was previously excluded + +- name: Synchronize passing in extra rsync options + synchronize: + src: /tmp/helloworld + dest: /var/www/helloworld + rsync_opts: + - "--no-motd" + - "--exclude=.git" + +# Hardlink files if they didn't change +- name: Use hardlinks when synchronizing filesystems + synchronize: + src: /tmp/path_a/foo.txt + dest: /tmp/path_b/foo.txt + link_dest: /tmp/path_a/ + +# Specify the rsync binary to use on remote host and on local host +- hosts: groupofhosts + vars: + ansible_rsync_path: /usr/gnu/bin/rsync + + tasks: + - name: copy /tmp/localpath/ to remote location /tmp/remotepath + synchronize: + src: /tmp/localpath/ + dest: /tmp/remotepath + rsync_path: /usr/gnu/bin/rsync +''' + + +import os +import errno + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.six.moves import shlex_quote + + +client_addr = None + + +def substitute_controller(path): + global client_addr + if not client_addr: + ssh_env_string = os.environ.get('SSH_CLIENT', None) + try: + client_addr, _ = ssh_env_string.split(None, 1) + except AttributeError: + ssh_env_string = os.environ.get('SSH_CONNECTION', None) + try: + client_addr, _ = ssh_env_string.split(None, 1) + except AttributeError: + pass + if not client_addr: + raise ValueError + + if path.startswith('localhost:'): + path = path.replace('localhost', client_addr, 1) + return path + + +def is_rsh_needed(source, dest): + if source.startswith('rsync://') or dest.startswith('rsync://'): + return False + if ':' in source or ':' in dest: + return True + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(type='str', required=True), + dest=dict(type='str', required=True), + dest_port=dict(type='int'), + delete=dict(type='bool', default=False), + private_key=dict(type='path'), + rsync_path=dict(type='str'), + _local_rsync_path=dict(type='path', default='rsync'), + _local_rsync_password=dict(type='str', no_log=True), + _substitute_controller=dict(type='bool', default=False), + archive=dict(type='bool', default=True), + checksum=dict(type='bool', default=False), + compress=dict(type='bool', default=True), + existing_only=dict(type='bool', default=False), + dirs=dict(type='bool', default=False), + recursive=dict(type='bool'), + links=dict(type='bool'), + copy_links=dict(type='bool', default=False), + perms=dict(type='bool'), + times=dict(type='bool'), + owner=dict(type='bool'), + group=dict(type='bool'), + set_remote_user=dict(type='bool', default=True), + rsync_timeout=dict(type='int', default=0), + rsync_opts=dict(type='list', default=[]), + ssh_args=dict(type='str'), + partial=dict(type='bool', default=False), + verify_host=dict(type='bool', default=False), + mode=dict(type='str', default='push', choices=['pull', 'push']), + link_dest=dict(type='list') + ), + supports_check_mode=True, + ) + + if module.params['_substitute_controller']: + try: + source = substitute_controller(module.params['src']) + dest = substitute_controller(module.params['dest']) + except ValueError: + module.fail_json(msg='Could not determine controller hostname for rsync to send to') + else: + source = module.params['src'] + dest = module.params['dest'] + dest_port = module.params['dest_port'] + delete = module.params['delete'] + private_key = module.params['private_key'] + rsync_path = module.params['rsync_path'] + rsync = module.params.get('_local_rsync_path', 'rsync') + rsync_password = module.params.get('_local_rsync_password') + rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout') + archive = module.params['archive'] + checksum = module.params['checksum'] + compress = module.params['compress'] + existing_only = module.params['existing_only'] + dirs = module.params['dirs'] + partial = module.params['partial'] + # the default of these params depends on the value of archive + recursive = module.params['recursive'] + links = module.params['links'] + copy_links = module.params['copy_links'] + perms = module.params['perms'] + times = module.params['times'] + owner = module.params['owner'] + group = module.params['group'] + rsync_opts = module.params['rsync_opts'] + ssh_args = module.params['ssh_args'] + verify_host = module.params['verify_host'] + link_dest = module.params['link_dest'] + + if '/' not in rsync: + rsync = module.get_bin_path(rsync, required=True) + + cmd = [rsync, '--delay-updates', '-F'] + _sshpass_pipe = None + if rsync_password: + try: + module.run_command(["sshpass"]) + except OSError: + module.fail_json( + msg="to use rsync connection with passwords, you must install the sshpass program" + ) + _sshpass_pipe = os.pipe() + cmd = ['sshpass', '-d' + to_native(_sshpass_pipe[0], errors='surrogate_or_strict')] + cmd + if compress: + cmd.append('--compress') + if rsync_timeout: + cmd.append('--timeout=%s' % rsync_timeout) + if module.check_mode: + cmd.append('--dry-run') + if delete: + cmd.append('--delete-after') + if existing_only: + cmd.append('--existing') + if checksum: + cmd.append('--checksum') + if copy_links: + cmd.append('--copy-links') + if archive: + cmd.append('--archive') + if recursive is False: + cmd.append('--no-recursive') + if links is False: + cmd.append('--no-links') + if perms is False: + cmd.append('--no-perms') + if times is False: + cmd.append('--no-times') + if owner is False: + cmd.append('--no-owner') + if group is False: + cmd.append('--no-group') + else: + if recursive is True: + cmd.append('--recursive') + if links is True: + cmd.append('--links') + if perms is True: + cmd.append('--perms') + if times is True: + cmd.append('--times') + if owner is True: + cmd.append('--owner') + if group is True: + cmd.append('--group') + if dirs: + cmd.append('--dirs') + + if source.startswith('rsync://') and dest.startswith('rsync://'): + module.fail_json(msg='either src or dest must be a localhost', rc=1) + + if is_rsh_needed(source, dest): + + # https://github.com/ansible/ansible/issues/15907 + has_rsh = False + for rsync_opt in rsync_opts: + if '--rsh' in rsync_opt: + has_rsh = True + break + + # if the user has not supplied an --rsh option go ahead and add ours + if not has_rsh: + ssh_cmd = [module.get_bin_path('ssh', required=True), '-S', 'none'] + if private_key is not None: + ssh_cmd.extend(['-i', private_key]) + # If the user specified a port value + # Note: The action plugin takes care of setting this to a port from + # inventory if the user didn't specify an explicit dest_port + if dest_port is not None: + ssh_cmd.extend(['-o', 'Port=%s' % dest_port]) + if not verify_host: + ssh_cmd.extend(['-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null']) + ssh_cmd_str = ' '.join(shlex_quote(arg) for arg in ssh_cmd) + if ssh_args: + ssh_cmd_str += ' %s' % ssh_args + cmd.append('--rsh=%s' % ssh_cmd_str) + + if rsync_path: + cmd.append('--rsync-path=%s' % rsync_path) + + if rsync_opts: + if '' in rsync_opts: + module.warn('The empty string is present in rsync_opts which will cause rsync to' + ' transfer the current working directory. If this is intended, use "."' + ' instead to get rid of this warning. If this is unintended, check for' + ' problems in your playbook leading to empty string in rsync_opts.') + cmd.extend(rsync_opts) + + if partial: + cmd.append('--partial') + + if link_dest: + cmd.append('-H') + # verbose required because rsync does not believe that adding a + # hardlink is actually a change + cmd.append('-vv') + for x in link_dest: + link_path = os.path.abspath(os.path.expanduser(x)) + destination_path = os.path.abspath(os.path.dirname(dest)) + if destination_path.find(link_path) == 0: + module.fail_json(msg='Hardlinking into a subdirectory of the source would cause recursion. %s and %s' % (destination_path, dest)) + cmd.append('--link-dest=%s' % link_path) + + changed_marker = '<<CHANGED>>' + cmd.append('--out-format=' + changed_marker + '%i %n%L') + + # expand the paths + if '@' not in source: + source = os.path.expanduser(source) + if '@' not in dest: + dest = os.path.expanduser(dest) + + cmd.append(source) + cmd.append(dest) + cmdstr = ' '.join(cmd) + + # If we are using password authentication, write the password into the pipe + if rsync_password: + def _write_password_to_pipe(proc): + os.close(_sshpass_pipe[0]) + try: + os.write(_sshpass_pipe[1], to_bytes(rsync_password) + b'\n') + except OSError as exc: + # Ignore broken pipe errors if the sshpass process has exited. + if exc.errno != errno.EPIPE or proc.poll() is None: + raise + + (rc, out, err) = module.run_command( + cmd, pass_fds=_sshpass_pipe, + before_communicate_callback=_write_password_to_pipe) + else: + (rc, out, err) = module.run_command(cmd) + + if rc: + return module.fail_json(msg=err, rc=rc, cmd=cmdstr) + + if link_dest: + # a leading period indicates no change + changed = (changed_marker + '.') not in out + else: + changed = changed_marker in out + + out_clean = out.replace(changed_marker, '') + out_lines = out_clean.split('\n') + while '' in out_lines: + out_lines.remove('') + if module._diff: + diff = {'prepared': out_clean} + return module.exit_json(changed=changed, msg=out_clean, + rc=rc, cmd=cmdstr, stdout_lines=out_lines, + diff=diff) + + return module.exit_json(changed=changed, msg=out_clean, + rc=rc, cmd=cmdstr, stdout_lines=out_lines) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/timezone.py b/test/support/integration/plugins/modules/timezone.py new file mode 100644 index 00000000..b7439a12 --- /dev/null +++ b/test/support/integration/plugins/modules/timezone.py @@ -0,0 +1,909 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Shinichi TAMURA (@tmshn) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: timezone +short_description: Configure timezone setting +description: + - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module. + - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. + - Several different tools are used depending on the OS/Distribution involved. + For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). + On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. + On AIX, C(chtz) is used. + - As of Ansible 2.3 support was added for SmartOS and BSDs. + - As of Ansible 2.4 support was added for macOS. + - As of Ansible 2.9 support was added for AIX 6.1+ + - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. +version_added: "2.2" +options: + name: + description: + - Name of the timezone for the system clock. + - Default is to keep current setting. + - B(At least one of name and hwclock are required.) + type: str + hwclock: + description: + - Whether the hardware clock is in UTC or in local timezone. + - Default is to keep current setting. + - Note that this option is recommended not to change and may fail + to configure, especially on virtual environments such as AWS. + - B(At least one of name and hwclock are required.) + - I(Only used on Linux.) + type: str + aliases: [ rtc ] + choices: [ local, UTC ] +notes: + - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone + - On AIX only Olson/tz database timezones are useable (POSIX is not supported). + - An OS reboot is also required on AIX for the new timezone setting to take effect. +author: + - Shinichi TAMURA (@tmshn) + - Jasper Lievisse Adriaanse (@jasperla) + - Indrajit Raychaudhuri (@indrajitr) +''' + +RETURN = r''' +diff: + description: The differences about the given arguments. + returned: success + type: complex + contains: + before: + description: The values before change + type: dict + after: + description: The values after change + type: dict +''' + +EXAMPLES = r''' +- name: Set timezone to Asia/Tokyo + timezone: + name: Asia/Tokyo +''' + +import errno +import os +import platform +import random +import re +import string +import filecmp + +from ansible.module_utils.basic import AnsibleModule, get_distribution +from ansible.module_utils.six import iteritems + + +class Timezone(object): + """This is a generic Timezone manipulation class that is subclassed based on platform. + + A subclass may wish to override the following action methods: + - get(key, phase) ... get the value from the system at `phase` + - set(key, value) ... set the value to the current system + """ + + def __new__(cls, module): + """Return the platform-specific subclass. + + It does not use load_platform_subclass() because it needs to judge based + on whether the `timedatectl` command exists and is available. + + Args: + module: The AnsibleModule. + """ + if platform.system() == 'Linux': + timedatectl = module.get_bin_path('timedatectl') + if timedatectl is not None: + rc, stdout, stderr = module.run_command(timedatectl) + if rc == 0: + return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) + else: + module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr) + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + else: + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + elif re.match('^joyent_.*Z', platform.version()): + # platform.system() returns SunOS, which is too broad. So look at the + # platform version instead. However we have to ensure that we're not + # running in the global zone where changing the timezone has no effect. + zonename_cmd = module.get_bin_path('zonename') + if zonename_cmd is not None: + (rc, stdout, _) = module.run_command(zonename_cmd) + if rc == 0 and stdout.strip() == 'global': + module.fail_json(msg='Adjusting timezone is not supported in Global Zone') + + return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone) + elif platform.system() == 'Darwin': + return super(Timezone, DarwinTimezone).__new__(DarwinTimezone) + elif re.match('^(Free|Net|Open)BSD', platform.platform()): + return super(Timezone, BSDTimezone).__new__(BSDTimezone) + elif platform.system() == 'AIX': + AIXoslevel = int(platform.version() + platform.release()) + if AIXoslevel >= 61: + return super(Timezone, AIXTimezone).__new__(AIXTimezone) + else: + module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel) + else: + # Not supported yet + return super(Timezone, Timezone).__new__(Timezone) + + def __init__(self, module): + """Initialize of the class. + + Args: + module: The AnsibleModule. + """ + super(Timezone, self).__init__() + self.msg = [] + # `self.value` holds the values for each params on each phases. + # Initially there's only info of "planned" phase, but the + # `self.check()` function will fill out it. + self.value = dict() + for key in module.argument_spec: + value = module.params[key] + if value is not None: + self.value[key] = dict(planned=value) + self.module = module + + def abort(self, msg): + """Abort the process with error message. + + This is just the wrapper of module.fail_json(). + + Args: + msg: The error message. + """ + error_msg = ['Error message:', msg] + if len(self.msg) > 0: + error_msg.append('Other message(s):') + error_msg.extend(self.msg) + self.module.fail_json(msg='\n'.join(error_msg)) + + def execute(self, *commands, **kwargs): + """Execute the shell command. + + This is just the wrapper of module.run_command(). + + Args: + *commands: The command to execute. + It will be concatenated with single space. + **kwargs: Only 'log' key is checked. + If kwargs['log'] is true, record the command to self.msg. + + Returns: + stdout: Standard output of the command. + """ + command = ' '.join(commands) + (rc, stdout, stderr) = self.module.run_command(command, check_rc=True) + if kwargs.get('log', False): + self.msg.append('executed `%s`' % command) + return stdout + + def diff(self, phase1='before', phase2='after'): + """Calculate the difference between given 2 phases. + + Args: + phase1, phase2: The names of phase to compare. + + Returns: + diff: The difference of value between phase1 and phase2. + This is in the format which can be used with the + `--diff` option of ansible-playbook. + """ + diff = {phase1: {}, phase2: {}} + for key, value in iteritems(self.value): + diff[phase1][key] = value[phase1] + diff[phase2][key] = value[phase2] + return diff + + def check(self, phase): + """Check the state in given phase and set it to `self.value`. + + Args: + phase: The name of the phase to check. + + Returns: + NO RETURN VALUE + """ + if phase == 'planned': + return + for key, value in iteritems(self.value): + value[phase] = self.get(key, phase) + + def change(self): + """Make the changes effect based on `self.value`.""" + for key, value in iteritems(self.value): + if value['before'] != value['planned']: + self.set(key, value['planned']) + + # =========================================== + # Platform specific methods (must be replaced by subclass). + + def get(self, key, phase): + """Get the value for the key at the given phase. + + Called from self.check(). + + Args: + key: The key to get the value + phase: The phase to get the value + + Return: + value: The value for the key at the given phase. + """ + self.abort('get(key, phase) is not implemented on target platform') + + def set(self, key, value): + """Set the value for the key (of course, for the phase 'after'). + + Called from self.change(). + + Args: + key: Key to set the value + value: Value to set + """ + self.abort('set(key, value) is not implemented on target platform') + + def _verify_timezone(self): + tz = self.value['name']['planned'] + tzfile = '/usr/share/zoneinfo/%s' % tz + if not os.path.isfile(tzfile): + self.abort('given timezone "%s" is not available' % tz) + return tzfile + + +class SystemdTimezone(Timezone): + """This is a Timezone manipulation class for systemd-powered Linux. + + It uses the `timedatectl` command to check/set all arguments. + """ + + regexps = dict( + hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE), + name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + subcmds = dict( + hwclock='set-local-rtc', + name='set-timezone' + ) + + def __init__(self, module): + super(SystemdTimezone, self).__init__(module) + self.timedatectl = module.get_bin_path('timedatectl', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_status(self, phase): + if phase not in self.status: + self.status[phase] = self.execute(self.timedatectl, 'status') + return self.status[phase] + + def get(self, key, phase): + status = self._get_status(phase) + value = self.regexps[key].search(status).group(1) + if key == 'hwclock': + # For key='hwclock'; convert yes/no -> local/UTC + if self.module.boolean(value): + value = 'local' + else: + value = 'UTC' + return value + + def set(self, key, value): + # For key='hwclock'; convert UTC/local -> yes/no + if key == 'hwclock': + if value == 'local': + value = 'yes' + else: + value = 'no' + self.execute(self.timedatectl, self.subcmds[key], value, log=True) + + +class NosystemdTimezone(Timezone): + """This is a Timezone manipulation class for non systemd-powered Linux. + + For timezone setting, it edits the following file and reflect changes: + - /etc/sysconfig/clock ... RHEL/CentOS + - /etc/timezone ... Debian/Ubuntu + For hwclock setting, it executes `hwclock --systohc` command with the + '--utc' or '--localtime' option. + """ + + conf_files = dict( + name=None, # To be set in __init__ + hwclock=None, # To be set in __init__ + adjtime='/etc/adjtime' + ) + + # It's fine if all tree config files don't exist + allow_no_file = dict( + name=True, + hwclock=True, + adjtime=True + ) + + regexps = dict( + name=None, # To be set in __init__ + hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE), + adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE) + ) + + dist_regexps = dict( + SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE), + redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE) + ) + + dist_tzline_format = dict( + SuSE='TIMEZONE="%s"\n', + redhat='ZONE="%s"\n' + ) + + def __init__(self, module): + super(NosystemdTimezone, self).__init__(module) + # Validate given timezone + if 'name' in self.value: + tzfile = self._verify_timezone() + # `--remove-destination` is needed if /etc/localtime is a symlink so + # that it overwrites it instead of following it. + self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)] + self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + # Distribution-specific configurations + if self.module.get_bin_path('dpkg-reconfigure') is not None: + # Debian/Ubuntu + if 'name' in self.value: + self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile), + '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)] + self.conf_files['name'] = '/etc/timezone' + self.conf_files['hwclock'] = '/etc/default/rcS' + self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) + self.tzline_format = '%s\n' + else: + # RHEL/CentOS/SUSE + if self.module.get_bin_path('tzdata-update') is not None: + # tzdata-update cannot update the timezone if /etc/localtime is + # a symlink so we have to use cp to update the time zone which + # was set above. + if not os.path.islink('/etc/localtime'): + self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)] + # else: + # self.update_timezone = 'cp --remove-destination ...' <- configured above + self.conf_files['name'] = '/etc/sysconfig/clock' + self.conf_files['hwclock'] = '/etc/sysconfig/clock' + try: + f = open(self.conf_files['name'], 'r') + except IOError as err: + if self._allow_ioerror(err, 'name'): + # If the config file doesn't exist detect the distribution and set regexps. + distribution = get_distribution() + if distribution == 'SuSE': + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + else: + self.abort('could not read configuration file "%s"' % self.conf_files['name']) + else: + # The key for timezone might be `ZONE` or `TIMEZONE` + # (the former is used in RHEL/CentOS and the latter is used in SUSE linux). + # So check the content of /etc/sysconfig/clock and decide which key to use. + sysconfig_clock = f.read() + f.close() + if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE): + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + + def _allow_ioerror(self, err, key): + # In some cases, even if the target file does not exist, + # simply creating it may solve the problem. + # In such cases, we should continue the configuration rather than aborting. + if err.errno != errno.ENOENT: + # If the error is not ENOENT ("No such file or directory"), + # (e.g., permission error, etc), we should abort. + return False + return self.allow_no_file.get(key, False) + + def _edit_file(self, filename, regexp, value, key): + """Replace the first matched line with given `value`. + + If `regexp` matched more than once, other than the first line will be deleted. + + Args: + filename: The name of the file to edit. + regexp: The regular expression to search with. + value: The line which will be inserted. + key: For what key the file is being editted. + """ + # Read the file + try: + file = open(filename, 'r') + except IOError as err: + if self._allow_ioerror(err, key): + lines = [] + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + lines = file.readlines() + file.close() + # Find the all matched lines + matched_indices = [] + for i, line in enumerate(lines): + if regexp.search(line): + matched_indices.append(i) + if len(matched_indices) > 0: + insert_line = matched_indices[0] + else: + insert_line = 0 + # Remove all matched lines + for i in matched_indices[::-1]: + del lines[i] + # ...and insert the value + lines.insert(insert_line, value) + # Write the changes + try: + file = open(filename, 'w') + except IOError: + self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename)) + else: + file.writelines(lines) + file.close() + self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename)) + + def _get_value_from_config(self, key, phase): + filename = self.conf_files[key] + try: + file = open(filename, mode='r') + except IOError as err: + if self._allow_ioerror(err, key): + if key == 'hwclock': + return 'n/a' + elif key == 'adjtime': + return 'UTC' + elif key == 'name': + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + status = file.read() + file.close() + try: + value = self.regexps[key].search(status).group(1) + except AttributeError: + if key == 'hwclock': + # If we cannot find UTC in the config that's fine. + return 'n/a' + elif key == 'adjtime': + # If we cannot find UTC/LOCAL in /etc/cannot that means UTC + # will be used by default. + return 'UTC' + elif key == 'name': + if phase == 'before': + # In 'before' phase UTC/LOCAL doesn't need to be set in + # the timezone config file, so we ignore this error. + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename)) + else: + if key == 'hwclock': + # convert yes/no -> UTC/local + if self.module.boolean(value): + value = 'UTC' + else: + value = 'local' + elif key == 'adjtime': + # convert LOCAL -> local + if value != 'UTC': + value = value.lower() + return value + + def get(self, key, phase): + planned = self.value[key]['planned'] + if key == 'hwclock': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the value in the config file is the same as the 'planned' + # value, we need to check /etc/adjtime. + value = self._get_value_from_config('adjtime', phase) + elif key == 'name': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the planned values is the same as the one in the config file + # we need to check if /etc/localtime is also set to the 'planned' zone. + if os.path.islink('/etc/localtime'): + # If /etc/localtime is a symlink and is not set to the TZ we 'planned' + # to set, we need to return the TZ which the symlink points to. + if os.path.exists('/etc/localtime'): + # We use readlink() because on some distros zone files are symlinks + # to other zone files, so it's hard to get which TZ is actually set + # if we follow the symlink. + path = os.readlink('/etc/localtime') + linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE) + if linktz: + valuelink = linktz.group(1) + if valuelink != planned: + value = valuelink + else: + # Set current TZ to 'n/a' if the symlink points to a path + # which isn't a zone file. + value = 'n/a' + else: + # Set current TZ to 'n/a' if the symlink to the zone file is broken. + value = 'n/a' + else: + # If /etc/localtime is not a symlink best we can do is compare it with + # the 'planned' zone info file and return 'n/a' if they are different. + try: + if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned): + return 'n/a' + except Exception: + return 'n/a' + else: + self.abort('unknown parameter "%s"' % key) + return value + + def set_timezone(self, value): + self._edit_file(filename=self.conf_files['name'], + regexp=self.regexps['name'], + value=self.tzline_format % value, + key='name') + for cmd in self.update_timezone: + self.execute(cmd) + + def set_hwclock(self, value): + if value == 'local': + option = '--localtime' + utc = 'no' + else: + option = '--utc' + utc = 'yes' + if self.conf_files['hwclock'] is not None: + self._edit_file(filename=self.conf_files['hwclock'], + regexp=self.regexps['hwclock'], + value='UTC=%s\n' % utc, + key='hwclock') + self.execute(self.update_hwclock, '--systohc', option, log=True) + + def set(self, key, value): + if key == 'name': + self.set_timezone(value) + elif key == 'hwclock': + self.set_hwclock(value) + else: + self.abort('unknown parameter "%s"' % key) + + +class SmartOSTimezone(Timezone): + """This is a Timezone manipulation class for SmartOS instances. + + It uses the C(sm-set-timezone) utility to set the timezone, and + inspects C(/etc/default/init) to determine the current timezone. + + NB: A zone needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(SmartOSTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False) + if not self.settimezone: + module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.') + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/default/init`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + try: + f = open('/etc/default/init', 'r') + for line in f: + m = re.match('^TZ=(.*)$', line.strip()) + if m: + return m.groups()[0] + except Exception: + self.module.fail_json(msg='Failed to read /etc/default/init') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through sm-set-timezone, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + cmd = 'sm-set-timezone %s' % value + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # sm-set-timezone knows no state and will always set the timezone. + # XXX: https://github.com/joyent/smtools/pull/2 + m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1]) + if not (m and m.groups()[-1] == value): + self.module.fail_json(msg='Failed to set timezone') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class DarwinTimezone(Timezone): + """This is the timezone implementation for Darwin which, unlike other *BSD + implementations, uses the `systemsetup` command on Darwin to check/set + the timezone. + """ + + regexps = dict( + name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + def __init__(self, module): + super(DarwinTimezone, self).__init__(module) + self.systemsetup = module.get_bin_path('systemsetup', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_current_timezone(self, phase): + """Lookup the current timezone via `systemsetup -gettimezone`.""" + if phase not in self.status: + self.status[phase] = self.execute(self.systemsetup, '-gettimezone') + return self.status[phase] + + def _verify_timezone(self): + tz = self.value['name']['planned'] + # Lookup the list of supported timezones via `systemsetup -listtimezones`. + # Note: Skip the first line that contains the label 'Time Zones:' + out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:] + tz_list = list(map(lambda x: x.strip(), out)) + if tz not in tz_list: + self.abort('given timezone "%s" is not available' % tz) + return tz + + def get(self, key, phase): + if key == 'name': + status = self._get_current_timezone(phase) + value = self.regexps[key].search(status).group(1) + return value + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + self.execute(self.systemsetup, '-settimezone', value, log=True) + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class BSDTimezone(Timezone): + """This is the timezone implementation for *BSD which works simply through + updating the `/etc/localtime` symlink to point to a valid timezone name under + `/usr/share/zoneinfo`. + """ + + def __init__(self, module): + super(BSDTimezone, self).__init__(module) + + def __get_timezone(self): + zoneinfo_dir = '/usr/share/zoneinfo/' + localtime_file = '/etc/localtime' + + # Strategy 1: + # If /etc/localtime does not exist, assum the timezone is UTC. + if not os.path.exists(localtime_file): + self.module.warn('Could not read /etc/localtime. Assuming UTC.') + return 'UTC' + + # Strategy 2: + # Follow symlink of /etc/localtime + zoneinfo_file = localtime_file + while not zoneinfo_file.startswith(zoneinfo_dir): + try: + zoneinfo_file = os.readlink(localtime_file) + except OSError: + # OSError means "end of symlink chain" or broken link. + break + else: + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 3: + # (If /etc/localtime is not symlinked) + # Check all files in /usr/share/zoneinfo and return first non-link match. + for dname, _, fnames in sorted(os.walk(zoneinfo_dir)): + for fname in sorted(fnames): + zoneinfo_file = os.path.join(dname, fname) + if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file): + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 4: + # As a fall-back, return 'UTC' as default assumption. + self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.') + return 'UTC' + + def get(self, key, phase): + """Lookup the current timezone by resolving `/etc/localtime`.""" + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + # First determine if the requested timezone is valid by looking in + # the zoneinfo directory. + zonefile = '/usr/share/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone' % value) + except Exception: + self.module.fail_json(msg='Failed to stat %s' % zonefile) + + # Now (somewhat) atomically update the symlink by creating a new + # symlink and move it into place. Otherwise we have to remove the + # original symlink and create the new symlink, however that would + # create a race condition in case another process tries to read + # /etc/localtime between removal and creation. + suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)]) + new_localtime = '/etc/localtime.' + suffix + + try: + os.symlink(zonefile, new_localtime) + os.rename(new_localtime, '/etc/localtime') + except Exception: + os.remove(new_localtime) + self.module.fail_json(msg='Could not update /etc/localtime') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class AIXTimezone(Timezone): + """This is a Timezone manipulation class for AIX instances. + + It uses the C(chtz) utility to set the timezone, and + inspects C(/etc/environment) to determine the current timezone. + + While AIX time zones can be set using two formats (POSIX and + Olson) the prefered method is Olson. + See the following article for more information: + https://developer.ibm.com/articles/au-aix-posix/ + + NB: AIX needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(AIXTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('chtz', required=True) + + def __get_timezone(self): + """ Return the current value of TZ= in /etc/environment """ + try: + f = open('/etc/environment', 'r') + etcenvironment = f.read() + f.close() + except Exception: + self.module.fail_json(msg='Issue reading contents of /etc/environment') + + match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE) + if match: + return match.group(1) + else: + return None + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/environment`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through chtz, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values. + # It will only return non-zero if the chtz command itself fails, it does not check for + # valid timezones. We need to perform a basic check to confirm that the timezone + # definition exists in /usr/share/lib/zoneinfo + # This does mean that we can only support Olson for now. The below commented out regex + # detects Olson date formats, so in the future we could detect Posix or Olson and + # act accordingly. + + # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE) + # if not regex_olson.match(value): + # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value + # self.module.fail_json(msg=msg) + + # First determine if the requested timezone is valid by looking in the zoneinfo + # directory. + zonefile = '/usr/share/lib/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone.' % value) + except Exception: + self.module.fail_json(msg='Failed to check %s.' % zonefile) + + # Now set the TZ using chtz + cmd = 'chtz %s' % value + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # The best condition check we can do is to check the value of TZ after making the + # change. + TZ = self.__get_timezone() + if TZ != value: + msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value) + self.module.fail_json(msg=msg) + + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +def main(): + # Construct 'module' and 'tz' + module = AnsibleModule( + argument_spec=dict( + hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']), + name=dict(type='str'), + ), + required_one_of=[ + ['hwclock', 'name'] + ], + supports_check_mode=True, + ) + tz = Timezone(module) + + # Check the current state + tz.check(phase='before') + if module.check_mode: + diff = tz.diff('before', 'planned') + # In check mode, 'planned' state is treated as 'after' state + diff['after'] = diff.pop('planned') + else: + # Make change + tz.change() + # Check the current state + tz.check(phase='after') + # Examine if the current state matches planned state + (after, planned) = tz.diff('after', 'planned').values() + if after != planned: + tz.abort('still not desired state, though changes have made - ' + 'planned: %s, after: %s' % (str(planned), str(after))) + diff = tz.diff('before', 'after') + + changed = (diff['before'] != diff['after']) + if len(tz.msg) > 0: + module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg)) + else: + module.exit_json(changed=changed, diff=diff) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/x509_crl.py b/test/support/integration/plugins/modules/x509_crl.py new file mode 100644 index 00000000..ef601eda --- /dev/null +++ b/test/support/integration/plugins/modules/x509_crl.py @@ -0,0 +1,783 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: x509_crl +version_added: "2.10" +short_description: Generate Certificate Revocation Lists (CRLs) +description: + - This module allows one to (re)generate or update Certificate Revocation Lists (CRLs). + - Certificates on the revocation list can be either specified via serial number and (optionally) their issuer, + or as a path to a certificate file in PEM format. +requirements: + - cryptography >= 1.2 +author: + - Felix Fontein (@felixfontein) +options: + state: + description: + - Whether the CRL file should exist or not, taking action if the state is different from what is stated. + type: str + default: present + choices: [ absent, present ] + + mode: + description: + - Defines how to process entries of existing CRLs. + - If set to C(generate), makes sure that the CRL has the exact set of revoked certificates + as specified in I(revoked_certificates). + - If set to C(update), makes sure that the CRL contains the revoked certificates from + I(revoked_certificates), but can also contain other revoked certificates. If the CRL file + already exists, all entries from the existing CRL will also be included in the new CRL. + When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes). + type: str + default: generate + choices: [ generate, update ] + + force: + description: + - Should the CRL be forced to be regenerated. + type: bool + default: no + + backup: + description: + - Create a backup file including a timestamp so you can get the original + CRL back if you overwrote it with a new one by accident. + type: bool + default: no + + path: + description: + - Remote absolute path where the generated CRL file should be created or is already located. + type: path + required: yes + + privatekey_path: + description: + - Path to the CA's private key to use when signing the CRL. + - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both. + type: path + + privatekey_content: + description: + - The content of the CA's private key to use when signing the CRL. + - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both. + type: str + + privatekey_passphrase: + description: + - The passphrase for the I(privatekey_path). + - This is required if the private key is password protected. + type: str + + issuer: + description: + - Key/value pairs that will be present in the issuer name field of the CRL. + - If you need to specify more than one value with the same key, use a list as value. + - Required if I(state) is C(present). + type: dict + + last_update: + description: + - The point in time from which this CRL can be trusted. + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent, except when + I(ignore_timestamps) is set to C(yes). + type: str + default: "+0s" + + next_update: + description: + - "The absolute latest point in time by which this I(issuer) is expected to have issued + another CRL. Many clients will treat a CRL as expired once I(next_update) occurs." + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent, except when + I(ignore_timestamps) is set to C(yes). + - Required if I(state) is C(present). + type: str + + digest: + description: + - Digest algorithm to be used when signing the CRL. + type: str + default: sha256 + + revoked_certificates: + description: + - List of certificates to be revoked. + - Required if I(state) is C(present). + type: list + elements: dict + suboptions: + path: + description: + - Path to a certificate in PEM format. + - The serial number and issuer will be extracted from the certificate. + - Mutually exclusive with I(content) and I(serial_number). One of these three options + must be specified. + type: path + content: + description: + - Content of a certificate in PEM format. + - The serial number and issuer will be extracted from the certificate. + - Mutually exclusive with I(path) and I(serial_number). One of these three options + must be specified. + type: str + serial_number: + description: + - Serial number of the certificate. + - Mutually exclusive with I(path) and I(content). One of these three options must + be specified. + type: int + revocation_date: + description: + - The point in time the certificate was revoked. + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent, except when + I(ignore_timestamps) is set to C(yes). + type: str + default: "+0s" + issuer: + description: + - The certificate's issuer. + - "Example: C(DNS:ca.example.org)" + type: list + elements: str + issuer_critical: + description: + - Whether the certificate issuer extension should be critical. + type: bool + default: no + reason: + description: + - The value for the revocation reason extension. + type: str + choices: + - unspecified + - key_compromise + - ca_compromise + - affiliation_changed + - superseded + - cessation_of_operation + - certificate_hold + - privilege_withdrawn + - aa_compromise + - remove_from_crl + reason_critical: + description: + - Whether the revocation reason extension should be critical. + type: bool + default: no + invalidity_date: + description: + - The point in time it was known/suspected that the private key was compromised + or that the certificate otherwise became invalid. + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent. This will NOT + change when I(ignore_timestamps) is set to C(yes). + type: str + invalidity_date_critical: + description: + - Whether the invalidity date extension should be critical. + type: bool + default: no + + ignore_timestamps: + description: + - Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in + I(revoked_certificates)) should be ignored for idempotency checks. The timestamp + I(invalidity_date) in I(revoked_certificates) will never be ignored. + - Use this in combination with relative timestamps for these values to get idempotency. + type: bool + default: no + + return_content: + description: + - If set to C(yes), will return the (current or generated) CRL's content as I(crl). + type: bool + default: no + +extends_documentation_fragment: + - files + +notes: + - All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern. + - Date specified should be UTC. Minutes and seconds are mandatory. +''' + +EXAMPLES = r''' +- name: Generate a CRL + x509_crl: + path: /etc/ssl/my-ca.crl + privatekey_path: /etc/ssl/private/my-ca.pem + issuer: + CN: My CA + last_update: "+0s" + next_update: "+7d" + revoked_certificates: + - serial_number: 1234 + revocation_date: 20190331202428Z + issuer: + CN: My CA + - serial_number: 2345 + revocation_date: 20191013152910Z + reason: affiliation_changed + invalidity_date: 20191001000000Z + - path: /etc/ssl/crt/revoked-cert.pem + revocation_date: 20191010010203Z +''' + +RETURN = r''' +filename: + description: Path to the generated CRL + returned: changed or success + type: str + sample: /path/to/my-ca.crl +backup_file: + description: Name of backup file created. + returned: changed and if I(backup) is C(yes) + type: str + sample: /path/to/my-ca.crl.2019-03-09@11:22~ +privatekey: + description: Path to the private CA key + returned: changed or success + type: str + sample: /path/to/my-ca.pem +issuer: + description: + - The CRL's issuer. + - Note that for repeated values, only the last one will be returned. + returned: success + type: dict + sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}' +issuer_ordered: + description: The CRL's issuer as an ordered list of tuples. + returned: success + type: list + elements: list + sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]' +last_update: + description: The point in time from which this CRL can be trusted as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +next_update: + description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +digest: + description: The signature algorithm used to sign the CRL. + returned: success + type: str + sample: sha256WithRSAEncryption +revoked_certificates: + description: List of certificates to be revoked. + returned: success + type: list + elements: dict + contains: + serial_number: + description: Serial number of the certificate. + type: int + sample: 1234 + revocation_date: + description: The point in time the certificate was revoked as ASN.1 TIME. + type: str + sample: 20190413202428Z + issuer: + description: The certificate's issuer. + type: list + elements: str + sample: '["DNS:ca.example.org"]' + issuer_critical: + description: Whether the certificate issuer extension is critical. + type: bool + sample: no + reason: + description: + - The value for the revocation reason extension. + - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded), + C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and + C(remove_from_crl). + type: str + sample: key_compromise + reason_critical: + description: Whether the revocation reason extension is critical. + type: bool + sample: no + invalidity_date: + description: | + The point in time it was known/suspected that the private key was compromised + or that the certificate otherwise became invalid as ASN.1 TIME. + type: str + sample: 20190413202428Z + invalidity_date_critical: + description: Whether the invalidity date extension is critical. + type: bool + sample: no +crl: + description: The (current or generated) CRL's content. + returned: if I(state) is C(present) and I(return_content) is C(yes) + type: str +''' + + +import os +import traceback +from distutils.version import LooseVersion + +from ansible.module_utils import crypto as crypto_utils +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +MINIMAL_CRYPTOGRAPHY_VERSION = '1.2' + +CRYPTOGRAPHY_IMP_ERR = None +try: + import cryptography + from cryptography import x509 + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives.serialization import Encoding + from cryptography.x509 import ( + CertificateRevocationListBuilder, + RevokedCertificateBuilder, + NameAttribute, + Name, + ) + CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__) +except ImportError: + CRYPTOGRAPHY_IMP_ERR = traceback.format_exc() + CRYPTOGRAPHY_FOUND = False +else: + CRYPTOGRAPHY_FOUND = True + + +TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ" + + +class CRLError(crypto_utils.OpenSSLObjectError): + pass + + +class CRL(crypto_utils.OpenSSLObject): + + def __init__(self, module): + super(CRL, self).__init__( + module.params['path'], + module.params['state'], + module.params['force'], + module.check_mode + ) + + self.update = module.params['mode'] == 'update' + self.ignore_timestamps = module.params['ignore_timestamps'] + self.return_content = module.params['return_content'] + self.crl_content = None + + self.privatekey_path = module.params['privatekey_path'] + self.privatekey_content = module.params['privatekey_content'] + if self.privatekey_content is not None: + self.privatekey_content = self.privatekey_content.encode('utf-8') + self.privatekey_passphrase = module.params['privatekey_passphrase'] + + self.issuer = crypto_utils.parse_name_field(module.params['issuer']) + self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]] + + self.last_update = crypto_utils.get_relative_time_option(module.params['last_update'], 'last_update') + self.next_update = crypto_utils.get_relative_time_option(module.params['next_update'], 'next_update') + + self.digest = crypto_utils.select_message_digest(module.params['digest']) + if self.digest is None: + raise CRLError('The digest "{0}" is not supported'.format(module.params['digest'])) + + self.revoked_certificates = [] + for i, rc in enumerate(module.params['revoked_certificates']): + result = { + 'serial_number': None, + 'revocation_date': None, + 'issuer': None, + 'issuer_critical': False, + 'reason': None, + 'reason_critical': False, + 'invalidity_date': None, + 'invalidity_date_critical': False, + } + path_prefix = 'revoked_certificates[{0}].'.format(i) + if rc['path'] is not None or rc['content'] is not None: + # Load certificate from file or content + try: + if rc['content'] is not None: + rc['content'] = rc['content'].encode('utf-8') + cert = crypto_utils.load_certificate(rc['path'], content=rc['content'], backend='cryptography') + try: + result['serial_number'] = cert.serial_number + except AttributeError: + # The property was called "serial" before cryptography 1.4 + result['serial_number'] = cert.serial + except crypto_utils.OpenSSLObjectError as e: + if rc['content'] is not None: + module.fail_json( + msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e)) + ) + else: + module.fail_json( + msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e)) + ) + else: + # Specify serial_number (and potentially issuer) directly + result['serial_number'] = rc['serial_number'] + # All other options + if rc['issuer']: + result['issuer'] = [crypto_utils.cryptography_get_name(issuer) for issuer in rc['issuer']] + result['issuer_critical'] = rc['issuer_critical'] + result['revocation_date'] = crypto_utils.get_relative_time_option( + rc['revocation_date'], + path_prefix + 'revocation_date' + ) + if rc['reason']: + result['reason'] = crypto_utils.REVOCATION_REASON_MAP[rc['reason']] + result['reason_critical'] = rc['reason_critical'] + if rc['invalidity_date']: + result['invalidity_date'] = crypto_utils.get_relative_time_option( + rc['invalidity_date'], + path_prefix + 'invalidity_date' + ) + result['invalidity_date_critical'] = rc['invalidity_date_critical'] + self.revoked_certificates.append(result) + + self.module = module + + self.backup = module.params['backup'] + self.backup_file = None + + try: + self.privatekey = crypto_utils.load_privatekey( + path=self.privatekey_path, + content=self.privatekey_content, + passphrase=self.privatekey_passphrase, + backend='cryptography' + ) + except crypto_utils.OpenSSLBadPassphraseError as exc: + raise CRLError(exc) + + self.crl = None + try: + with open(self.path, 'rb') as f: + data = f.read() + self.crl = x509.load_pem_x509_crl(data, default_backend()) + if self.return_content: + self.crl_content = data + except Exception as dummy: + self.crl_content = None + + def remove(self): + if self.backup: + self.backup_file = self.module.backup_local(self.path) + super(CRL, self).remove(self.module) + + def _compress_entry(self, entry): + if self.ignore_timestamps: + # Throw out revocation_date + return ( + entry['serial_number'], + tuple(entry['issuer']) if entry['issuer'] is not None else None, + entry['issuer_critical'], + entry['reason'], + entry['reason_critical'], + entry['invalidity_date'], + entry['invalidity_date_critical'], + ) + else: + return ( + entry['serial_number'], + entry['revocation_date'], + tuple(entry['issuer']) if entry['issuer'] is not None else None, + entry['issuer_critical'], + entry['reason'], + entry['reason_critical'], + entry['invalidity_date'], + entry['invalidity_date_critical'], + ) + + def check(self, perms_required=True): + """Ensure the resource is in its desired state.""" + + state_and_perms = super(CRL, self).check(self.module, perms_required) + + if not state_and_perms: + return False + + if self.crl is None: + return False + + if self.last_update != self.crl.last_update and not self.ignore_timestamps: + return False + if self.next_update != self.crl.next_update and not self.ignore_timestamps: + return False + if self.digest.name != self.crl.signature_hash_algorithm.name: + return False + + want_issuer = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer] + if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]: + return False + + old_entries = [self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(cert)) for cert in self.crl] + new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates] + if self.update: + # We don't simply use a set so that duplicate entries are treated correctly + for entry in new_entries: + try: + old_entries.remove(entry) + except ValueError: + return False + else: + if old_entries != new_entries: + return False + + return True + + def _generate_crl(self): + backend = default_backend() + crl = CertificateRevocationListBuilder() + + try: + crl = crl.issuer_name(Name([ + NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1])) + for entry in self.issuer + ])) + except ValueError as e: + raise CRLError(e) + + crl = crl.last_update(self.last_update) + crl = crl.next_update(self.next_update) + + if self.update and self.crl: + new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates]) + for entry in self.crl: + decoded_entry = self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(entry)) + if decoded_entry not in new_entries: + crl = crl.add_revoked_certificate(entry) + for entry in self.revoked_certificates: + revoked_cert = RevokedCertificateBuilder() + revoked_cert = revoked_cert.serial_number(entry['serial_number']) + revoked_cert = revoked_cert.revocation_date(entry['revocation_date']) + if entry['issuer'] is not None: + revoked_cert = revoked_cert.add_extension( + x509.CertificateIssuer([ + crypto_utils.cryptography_get_name(name) for name in self.entry['issuer'] + ]), + entry['issuer_critical'] + ) + if entry['reason'] is not None: + revoked_cert = revoked_cert.add_extension( + x509.CRLReason(entry['reason']), + entry['reason_critical'] + ) + if entry['invalidity_date'] is not None: + revoked_cert = revoked_cert.add_extension( + x509.InvalidityDate(entry['invalidity_date']), + entry['invalidity_date_critical'] + ) + crl = crl.add_revoked_certificate(revoked_cert.build(backend)) + + self.crl = crl.sign(self.privatekey, self.digest, backend=backend) + return self.crl.public_bytes(Encoding.PEM) + + def generate(self): + if not self.check(perms_required=False) or self.force: + result = self._generate_crl() + if self.return_content: + self.crl_content = result + if self.backup: + self.backup_file = self.module.backup_local(self.path) + crypto_utils.write_file(self.module, result) + self.changed = True + + file_args = self.module.load_file_common_arguments(self.module.params) + if self.module.set_fs_attributes_if_different(file_args, False): + self.changed = True + + def _dump_revoked(self, entry): + return { + 'serial_number': entry['serial_number'], + 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT), + 'issuer': + [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']] + if entry['issuer'] is not None else None, + 'issuer_critical': entry['issuer_critical'], + 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None, + 'reason_critical': entry['reason_critical'], + 'invalidity_date': + entry['invalidity_date'].strftime(TIMESTAMP_FORMAT) + if entry['invalidity_date'] is not None else None, + 'invalidity_date_critical': entry['invalidity_date_critical'], + } + + def dump(self, check_mode=False): + result = { + 'changed': self.changed, + 'filename': self.path, + 'privatekey': self.privatekey_path, + 'last_update': None, + 'next_update': None, + 'digest': None, + 'issuer_ordered': None, + 'issuer': None, + 'revoked_certificates': [], + } + if self.backup_file: + result['backup_file'] = self.backup_file + + if check_mode: + result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT) + result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT) + # result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid) + result['digest'] = self.module.params['digest'] + result['issuer_ordered'] = self.issuer + result['issuer'] = {} + for k, v in self.issuer: + result['issuer'][k] = v + result['revoked_certificates'] = [] + for entry in self.revoked_certificates: + result['revoked_certificates'].append(self._dump_revoked(entry)) + elif self.crl: + result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT) + result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT) + try: + result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid) + except AttributeError: + # Older cryptography versions don't have signature_algorithm_oid yet + dotted = crypto_utils._obj2txt( + self.crl._backend._lib, + self.crl._backend._ffi, + self.crl._x509_crl.sig_alg.algorithm + ) + oid = x509.oid.ObjectIdentifier(dotted) + result['digest'] = crypto_utils.cryptography_oid_to_name(oid) + issuer = [] + for attribute in self.crl.issuer: + issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value]) + result['issuer_ordered'] = issuer + result['issuer'] = {} + for k, v in issuer: + result['issuer'][k] = v + result['revoked_certificates'] = [] + for cert in self.crl: + entry = crypto_utils.cryptography_decode_revoked_certificate(cert) + result['revoked_certificates'].append(self._dump_revoked(entry)) + + if self.return_content: + result['crl'] = self.crl_content + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + mode=dict(type='str', default='generate', choices=['generate', 'update']), + force=dict(type='bool', default=False), + backup=dict(type='bool', default=False), + path=dict(type='path', required=True), + privatekey_path=dict(type='path'), + privatekey_content=dict(type='str'), + privatekey_passphrase=dict(type='str', no_log=True), + issuer=dict(type='dict'), + last_update=dict(type='str', default='+0s'), + next_update=dict(type='str'), + digest=dict(type='str', default='sha256'), + ignore_timestamps=dict(type='bool', default=False), + return_content=dict(type='bool', default=False), + revoked_certificates=dict( + type='list', + elements='dict', + options=dict( + path=dict(type='path'), + content=dict(type='str'), + serial_number=dict(type='int'), + revocation_date=dict(type='str', default='+0s'), + issuer=dict(type='list', elements='str'), + issuer_critical=dict(type='bool', default=False), + reason=dict( + type='str', + choices=[ + 'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed', + 'superseded', 'cessation_of_operation', 'certificate_hold', + 'privilege_withdrawn', 'aa_compromise', 'remove_from_crl' + ] + ), + reason_critical=dict(type='bool', default=False), + invalidity_date=dict(type='str'), + invalidity_date_critical=dict(type='bool', default=False), + ), + required_one_of=[['path', 'content', 'serial_number']], + mutually_exclusive=[['path', 'content', 'serial_number']], + ), + ), + required_if=[ + ('state', 'present', ['privatekey_path', 'privatekey_content'], True), + ('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False), + ], + mutually_exclusive=( + ['privatekey_path', 'privatekey_content'], + ), + supports_check_mode=True, + add_file_common_args=True, + ) + + if not CRYPTOGRAPHY_FOUND: + module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)), + exception=CRYPTOGRAPHY_IMP_ERR) + + try: + crl = CRL(module) + + if module.params['state'] == 'present': + if module.check_mode: + result = crl.dump(check_mode=True) + result['changed'] = module.params['force'] or not crl.check() + module.exit_json(**result) + + crl.generate() + else: + if module.check_mode: + result = crl.dump(check_mode=True) + result['changed'] = os.path.exists(module.params['path']) + module.exit_json(**result) + + crl.remove() + + result = crl.dump() + module.exit_json(**result) + except crypto_utils.OpenSSLObjectError as exc: + module.fail_json(msg=to_native(exc)) + + +if __name__ == "__main__": + main() diff --git a/test/support/integration/plugins/modules/x509_crl_info.py b/test/support/integration/plugins/modules/x509_crl_info.py new file mode 100644 index 00000000..b61db26f --- /dev/null +++ b/test/support/integration/plugins/modules/x509_crl_info.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: x509_crl_info +version_added: "2.10" +short_description: Retrieve information on Certificate Revocation Lists (CRLs) +description: + - This module allows one to retrieve information on Certificate Revocation Lists (CRLs). +requirements: + - cryptography >= 1.2 +author: + - Felix Fontein (@felixfontein) +options: + path: + description: + - Remote absolute path where the generated CRL file should be created or is already located. + - Either I(path) or I(content) must be specified, but not both. + type: path + content: + description: + - Content of the X.509 certificate in PEM format. + - Either I(path) or I(content) must be specified, but not both. + type: str + +notes: + - All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern. + They are all in UTC. +seealso: + - module: x509_crl +''' + +EXAMPLES = r''' +- name: Get information on CRL + x509_crl_info: + path: /etc/ssl/my-ca.crl + register: result + +- debug: + msg: "{{ result }}" +''' + +RETURN = r''' +issuer: + description: + - The CRL's issuer. + - Note that for repeated values, only the last one will be returned. + returned: success + type: dict + sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}' +issuer_ordered: + description: The CRL's issuer as an ordered list of tuples. + returned: success + type: list + elements: list + sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]' +last_update: + description: The point in time from which this CRL can be trusted as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +next_update: + description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +digest: + description: The signature algorithm used to sign the CRL. + returned: success + type: str + sample: sha256WithRSAEncryption +revoked_certificates: + description: List of certificates to be revoked. + returned: success + type: list + elements: dict + contains: + serial_number: + description: Serial number of the certificate. + type: int + sample: 1234 + revocation_date: + description: The point in time the certificate was revoked as ASN.1 TIME. + type: str + sample: 20190413202428Z + issuer: + description: The certificate's issuer. + type: list + elements: str + sample: '["DNS:ca.example.org"]' + issuer_critical: + description: Whether the certificate issuer extension is critical. + type: bool + sample: no + reason: + description: + - The value for the revocation reason extension. + - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded), + C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and + C(remove_from_crl). + type: str + sample: key_compromise + reason_critical: + description: Whether the revocation reason extension is critical. + type: bool + sample: no + invalidity_date: + description: | + The point in time it was known/suspected that the private key was compromised + or that the certificate otherwise became invalid as ASN.1 TIME. + type: str + sample: 20190413202428Z + invalidity_date_critical: + description: Whether the invalidity date extension is critical. + type: bool + sample: no +''' + + +import traceback +from distutils.version import LooseVersion + +from ansible.module_utils import crypto as crypto_utils +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +MINIMAL_CRYPTOGRAPHY_VERSION = '1.2' + +CRYPTOGRAPHY_IMP_ERR = None +try: + import cryptography + from cryptography import x509 + from cryptography.hazmat.backends import default_backend + CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__) +except ImportError: + CRYPTOGRAPHY_IMP_ERR = traceback.format_exc() + CRYPTOGRAPHY_FOUND = False +else: + CRYPTOGRAPHY_FOUND = True + + +TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ" + + +class CRLError(crypto_utils.OpenSSLObjectError): + pass + + +class CRLInfo(crypto_utils.OpenSSLObject): + """The main module implementation.""" + + def __init__(self, module): + super(CRLInfo, self).__init__( + module.params['path'] or '', + 'present', + False, + module.check_mode + ) + + self.content = module.params['content'] + + self.module = module + + self.crl = None + if self.content is None: + try: + with open(self.path, 'rb') as f: + data = f.read() + except Exception as e: + self.module.fail_json(msg='Error while reading CRL file from disk: {0}'.format(e)) + else: + data = self.content.encode('utf-8') + + try: + self.crl = x509.load_pem_x509_crl(data, default_backend()) + except Exception as e: + self.module.fail_json(msg='Error while decoding CRL: {0}'.format(e)) + + def _dump_revoked(self, entry): + return { + 'serial_number': entry['serial_number'], + 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT), + 'issuer': + [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']] + if entry['issuer'] is not None else None, + 'issuer_critical': entry['issuer_critical'], + 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None, + 'reason_critical': entry['reason_critical'], + 'invalidity_date': + entry['invalidity_date'].strftime(TIMESTAMP_FORMAT) + if entry['invalidity_date'] is not None else None, + 'invalidity_date_critical': entry['invalidity_date_critical'], + } + + def get_info(self): + result = { + 'changed': False, + 'last_update': None, + 'next_update': None, + 'digest': None, + 'issuer_ordered': None, + 'issuer': None, + 'revoked_certificates': [], + } + + result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT) + result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT) + try: + result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid) + except AttributeError: + # Older cryptography versions don't have signature_algorithm_oid yet + dotted = crypto_utils._obj2txt( + self.crl._backend._lib, + self.crl._backend._ffi, + self.crl._x509_crl.sig_alg.algorithm + ) + oid = x509.oid.ObjectIdentifier(dotted) + result['digest'] = crypto_utils.cryptography_oid_to_name(oid) + issuer = [] + for attribute in self.crl.issuer: + issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value]) + result['issuer_ordered'] = issuer + result['issuer'] = {} + for k, v in issuer: + result['issuer'][k] = v + result['revoked_certificates'] = [] + for cert in self.crl: + entry = crypto_utils.cryptography_decode_revoked_certificate(cert) + result['revoked_certificates'].append(self._dump_revoked(entry)) + + return result + + def generate(self): + # Empty method because crypto_utils.OpenSSLObject wants this + pass + + def dump(self): + # Empty method because crypto_utils.OpenSSLObject wants this + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path'), + content=dict(type='str'), + ), + required_one_of=( + ['path', 'content'], + ), + mutually_exclusive=( + ['path', 'content'], + ), + supports_check_mode=True, + ) + + if not CRYPTOGRAPHY_FOUND: + module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)), + exception=CRYPTOGRAPHY_IMP_ERR) + + try: + crl = CRLInfo(module) + result = crl.get_info() + module.exit_json(**result) + except crypto_utils.OpenSSLObjectError as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/test/support/integration/plugins/modules/xml.py b/test/support/integration/plugins/modules/xml.py new file mode 100644 index 00000000..b5b35a38 --- /dev/null +++ b/test/support/integration/plugins/modules/xml.py @@ -0,0 +1,966 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Red Hat, Inc. +# Copyright: (c) 2014, Tim Bielawa <tbielawa@redhat.com> +# Copyright: (c) 2014, Magnus Hedemark <mhedemar@redhat.com> +# Copyright: (c) 2017, Dag Wieers <dag@wieers.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: xml +short_description: Manage bits and pieces of XML files or strings +description: +- A CRUD-like interface to managing bits of XML files. +version_added: '2.4' +options: + path: + description: + - Path to the file to operate on. + - This file must exist ahead of time. + - This parameter is required, unless C(xmlstring) is given. + type: path + required: yes + aliases: [ dest, file ] + xmlstring: + description: + - A string containing XML on which to operate. + - This parameter is required, unless C(path) is given. + type: str + required: yes + xpath: + description: + - A valid XPath expression describing the item(s) you want to manipulate. + - Operates on the document root, C(/), by default. + type: str + namespaces: + description: + - The namespace C(prefix:uri) mapping for the XPath expression. + - Needs to be a C(dict), not a C(list) of items. + type: dict + state: + description: + - Set or remove an xpath selection (node(s), attribute(s)). + type: str + choices: [ absent, present ] + default: present + aliases: [ ensure ] + attribute: + description: + - The attribute to select when using parameter C(value). + - This is a string, not prepended with C(@). + type: raw + value: + description: + - Desired state of the selected attribute. + - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)). + - Elements default to no value (but present). + - Attributes default to an empty string. + type: raw + add_children: + description: + - Add additional child-element(s) to a selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C(<ansible/>) child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: list + set_children: + description: + - Set the child-element(s) of a selected element for a given C(xpath). + - Removes any existing children. + - Child elements must be specified as in C(add_children). + - This parameter requires C(xpath) to be set. + type: list + count: + description: + - Search for a given C(xpath) and provide the count of any matches. + - This parameter requires C(xpath) to be set. + type: bool + default: no + print_match: + description: + - Search for a given C(xpath) and print out any matches. + - This parameter requires C(xpath) to be set. + type: bool + default: no + pretty_print: + description: + - Pretty print XML output. + type: bool + default: no + content: + description: + - Search for a given C(xpath) and get content. + - This parameter requires C(xpath) to be set. + type: str + choices: [ attribute, text ] + input_type: + description: + - Type of input for C(add_children) and C(set_children). + type: str + choices: [ xml, yaml ] + default: yaml + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: no + strip_cdata_tags: + description: + - Remove CDATA tags surrounding text values. + - Note that this might break your XML file if text values contain characters that could be interpreted as XML. + type: bool + default: no + version_added: '2.7' + insertbefore: + description: + - Add additional child-element(s) before the first selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C(<ansible/>) child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: bool + default: no + version_added: '2.8' + insertafter: + description: + - Add additional child-element(s) after the last selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C(<ansible/>) child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: bool + default: no + version_added: '2.8' +requirements: +- lxml >= 2.3.0 +notes: +- Use the C(--check) and C(--diff) options when testing your expressions. +- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. +- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. +- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples. +- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. +seealso: +- name: Xml module development community wiki + description: More information related to the development of this xml module. + link: https://github.com/ansible/community/wiki/Module:-xml +- name: Introduction to XPath + description: A brief tutorial on XPath (w3schools.com). + link: https://www.w3schools.com/xml/xpath_intro.asp +- name: XPath Reference document + description: The reference documentation on XSLT/XPath (developer.mozilla.org). + link: https://developer.mozilla.org/en-US/docs/Web/XPath +author: +- Tim Bielawa (@tbielawa) +- Magnus Hedemark (@magnus919) +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +# Consider the following XML file: +# +# <business type="bar"> +# <name>Tasty Beverage Co.</name> +# <beers> +# <beer>Rochefort 10</beer> +# <beer>St. Bernardus Abbot 12</beer> +# <beer>Schlitz</beer> +# </beers> +# <rating subjective="true">10</rating> +# <website> +# <mobilefriendly/> +# <address>http://tastybeverageco.com</address> +# </website> +# </business> + +- name: Remove the 'subjective' attribute of the 'rating' element + xml: + path: /foo/bar.xml + xpath: /business/rating/@subjective + state: absent + +- name: Set the rating to '11' + xml: + path: /foo/bar.xml + xpath: /business/rating + value: 11 + +# Retrieve and display the number of nodes +- name: Get count of 'beers' nodes + xml: + path: /foo/bar.xml + xpath: /business/beers/beer + count: yes + register: hits + +- debug: + var: hits.count + +# Example where parent XML nodes are created automatically +- name: Add a 'phonenumber' element to the 'business' element + xml: + path: /foo/bar.xml + xpath: /business/phonenumber + value: 555-555-1234 + +- name: Add several more beers to the 'beers' element + xml: + path: /foo/bar.xml + xpath: /business/beers + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element + xml: + path: /foo/bar.xml + xpath: '/business/beers/beer[text()="Rochefort 10"]' + insertbefore: yes + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements +- name: Add a 'validxhtml' element to the 'website' element + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + +- name: Add an empty 'validatedon' attribute to the 'validxhtml' element + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml/@validatedon + +- name: Add or modify an attribute, add element if needed + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + attribute: validatedon + value: 1976-08-05 + +# How to read an attribute value and access it in Ansible +- name: Read an element's attribute values + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + content: attribute + register: xmlresp + +- name: Show an attribute value + debug: + var: xmlresp.matches[0].validxhtml.validatedon + +- name: Remove all children from the 'website' element (option 1) + xml: + path: /foo/bar.xml + xpath: /business/website/* + state: absent + +- name: Remove all children from the 'website' element (option 2) + xml: + path: /foo/bar.xml + xpath: /business/website + children: [] + +# In case of namespaces, like in below XML, they have to be explicitly stated. +# +# <foo xmlns="http://x.test" xmlns:attr="http://z.test"> +# <bar> +# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" /> +# </bar> +# </foo> + +# NOTE: There is the prefix 'x' in front of the 'bar' element, too. +- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false' + xml: + path: foo.xml + xpath: /x:foo/x:bar/y:baz + namespaces: + x: http://x.test + y: http://y.test + z: http://z.test + attribute: z:my_namespaced_attribute + value: 'false' +''' + +RETURN = r''' +actions: + description: A dictionary with the original xpath, namespaces and state. + type: dict + returned: success + sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present} +backup_file: + description: The name of the backup file that was created + type: str + returned: when backup=yes + sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ +count: + description: The count of xpath matches. + type: int + returned: when parameter 'count' is set + sample: 2 +matches: + description: The xpath matches found. + type: list + returned: when parameter 'print_match' is set +msg: + description: A message related to the performed action(s). + type: str + returned: always +xmlstring: + description: An XML string of the resulting output. + type: str + returned: when parameter 'xmlstring' is set +''' + +import copy +import json +import os +import re +import traceback + +from distutils.version import LooseVersion +from io import BytesIO + +LXML_IMP_ERR = None +try: + from lxml import etree, objectify + HAS_LXML = True +except ImportError: + LXML_IMP_ERR = traceback.format_exc() + HAS_LXML = False + +from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common._collections_compat import MutableMapping + +_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" +_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT +# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate +# strings wrapped by the other delimiter' XPath trick, especially as simple XPath. +_XPSTR = "('(?:.*)'|\"(?:.*)\")" + +_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$") +_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$") +_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$") +_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$") +_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$") +_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$") + + +def has_changed(doc): + orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc))) + obj = etree.tostring(objectify.fromstring(etree.tostring(doc))) + return (orig_obj != obj) + + +def do_print_match(module, tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + match_xpaths = [] + for m in match: + match_xpaths.append(tree.getpath(m)) + match_str = json.dumps(match_xpaths) + msg = "selector '%s' match: %s" % (xpath, match_str) + finish(module, tree, xpath, namespaces, changed=False, msg=msg) + + +def count_nodes(module, tree, xpath, namespaces): + """ Return the count of nodes matching the xpath """ + hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces) + msg = "found %d nodes" % hits + finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits)) + + +def is_node(tree, xpath, namespaces): + """ Test if a given xpath matches anything and if that match is a node. + + For now we just assume you're only searching for one specific thing.""" + if xpath_matches(tree, xpath, namespaces): + # OK, it found something + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._Element): + return True + + return False + + +def is_attribute(tree, xpath, namespaces): + """ Test if a given xpath matches and that match is an attribute + + An xpath attribute search will only match one item""" + if xpath_matches(tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._ElementStringResult): + return True + elif isinstance(match[0], etree._ElementUnicodeResult): + return True + return False + + +def xpath_matches(tree, xpath, namespaces): + """ Test if a node exists """ + if tree.xpath(xpath, namespaces=namespaces): + return True + return False + + +def delete_xpath_target(module, tree, xpath, namespaces): + """ Delete an attribute or element from a tree """ + try: + for result in tree.xpath(xpath, namespaces=namespaces): + # Get the xpath for this result + if is_attribute(tree, xpath, namespaces): + # Delete an attribute + parent = result.getparent() + # Pop this attribute match out of the parent + # node's 'attrib' dict by using this match's + # 'attrname' attribute for the key + parent.attrib.pop(result.attrname) + elif is_node(tree, xpath, namespaces): + # Delete an element + result.getparent().remove(result) + else: + raise Exception("Impossible error") + except Exception as e: + module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e)) + else: + finish(module, tree, xpath, namespaces, changed=True) + + +def replace_children_of(children, match): + for element in list(match): + match.remove(element) + match.extend(children) + + +def set_target_children_inner(module, tree, xpath, namespaces, children, in_type): + matches = tree.xpath(xpath, namespaces=namespaces) + + # Create a list of our new children + children = children_to_nodes(module, children, in_type) + children_as_string = [etree.tostring(c) for c in children] + + changed = False + + # xpaths always return matches as a list, so.... + for match in matches: + # Check if elements differ + if len(list(match)) == len(children): + for idx, element in enumerate(list(match)): + if etree.tostring(element) != children_as_string[idx]: + replace_children_of(children, match) + changed = True + break + else: + replace_children_of(children, match) + changed = True + + return changed + + +def set_target_children(module, tree, xpath, namespaces, children, in_type): + changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type) + # Write it out + finish(module, tree, xpath, namespaces, changed=changed) + + +def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter): + if is_node(tree, xpath, namespaces): + new_kids = children_to_nodes(module, children, in_type) + if insertbefore or insertafter: + insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter) + else: + for node in tree.xpath(xpath, namespaces=namespaces): + node.extend(new_kids) + finish(module, tree, xpath, namespaces, changed=True) + else: + finish(module, tree, xpath, namespaces) + + +def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter): + """ + Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the + first xpath hit, with insertafter, it is inserted after the last xpath hit. + """ + insert_target = tree.xpath(xpath, namespaces=namespaces) + loc_index = 0 if insertbefore else -1 + index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index]) + parent = insert_target[0].getparent() + if insertafter: + index_in_parent += 1 + for child in children: + parent.insert(index_in_parent, child) + index_in_parent += 1 + + +def _extract_xpstr(g): + return g[1:-1] + + +def split_xpath_last(xpath): + """split an XPath of the form /foo/bar/baz into /foo/bar and baz""" + xpath = xpath.strip() + m = _RE_SPLITSIMPLELAST.match(xpath) + if m: + # requesting an element to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath) + if m: + # requesting an element to exist with an inner text + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSIMPLEATTRLAST.match(xpath) + if m: + # requesting an attribute to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath) + if m: + # requesting an attribute to exist with a value + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSUBLAST.match(xpath) + if m: + content = [x.strip() for x in m.group(3).split(" and ")] + return (m.group(1), [('/' + m.group(2), content)]) + + m = _RE_SPLITONLYEQVALUE.match(xpath) + if m: + # requesting a change of inner text + return (m.group(1), [("", _extract_xpstr(m.group(2)))]) + return (xpath, []) + + +def nsnameToClark(name, namespaces): + if ":" in name: + (nsname, rawname) = name.split(":") + # return "{{%s}}%s" % (namespaces[nsname], rawname) + return "{{{0}}}{1}".format(namespaces[nsname], rawname) + + # no namespace name here + return name + + +def check_or_make_target(module, tree, xpath, namespaces): + (inner_xpath, changes) = split_xpath_last(xpath) + if (inner_xpath == xpath) or (changes is None): + module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + return False + + changed = False + + if not is_node(tree, inner_xpath, namespaces): + changed = check_or_make_target(module, tree, inner_xpath, namespaces) + + # we test again after calling check_or_make_target + if is_node(tree, inner_xpath, namespaces) and changes: + for (eoa, eoa_value) in changes: + if eoa and eoa[0] != '@' and eoa[0] != '/': + # implicitly creating an element + new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml") + if eoa_value: + for nk in new_kids: + nk.text = eoa_value + + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + changed = True + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa and eoa[0] == '/': + element = eoa[1:] + new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml") + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + for nk in new_kids: + for subexpr in eoa_value: + # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" % + # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True)) + check_or_make_target(module, nk, "./" + subexpr, namespaces) + changed = True + + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa == "": + for node in tree.xpath(inner_xpath, namespaces=namespaces): + if (node.text != eoa_value): + node.text = eoa_value + changed = True + + elif eoa and eoa[0] == '@': + attribute = nsnameToClark(eoa[1:], namespaces) + + for element in tree.xpath(inner_xpath, namespaces=namespaces): + changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value) + + if changing: + changed = changed or changing + if eoa_value is None: + value = "" + else: + value = eoa_value + element.attrib[attribute] = value + + # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" % + # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True))) + + else: + module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True)) + + return changed + + +def ensure_xpath_exists(module, tree, xpath, namespaces): + changed = False + + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + + finish(module, tree, xpath, namespaces, changed) + + +def set_target_inner(module, tree, xpath, namespaces, attribute, value): + changed = False + + try: + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + except Exception as e: + missing_namespace = "" + # NOTE: This checks only the namespaces defined in root element! + # TODO: Implement a more robust check to check for child namespaces' existence + if tree.getroot().nsmap and ":" not in xpath: + missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n" + module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" % + (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc()) + + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + + for element in tree.xpath(xpath, namespaces=namespaces): + if not attribute: + changed = changed or (element.text != value) + if element.text != value: + element.text = value + else: + changed = changed or (element.get(attribute) != value) + if ":" in attribute: + attr_ns, attr_name = attribute.split(":") + # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name) + attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name) + if element.get(attribute) != value: + element.set(attribute, value) + + return changed + + +def set_target(module, tree, xpath, namespaces, attribute, value): + changed = set_target_inner(module, tree, xpath, namespaces, attribute, value) + finish(module, tree, xpath, namespaces, changed) + + +def get_element_text(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + elements.append({element.tag: element.text}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def get_element_attr(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + child = {} + for key in element.keys(): + value = element.get(key) + child.update({key: value}) + elements.append({element.tag: child}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def child_to_element(module, child, in_type): + if in_type == 'xml': + infile = BytesIO(to_bytes(child, errors='surrogate_or_strict')) + + try: + parser = etree.XMLParser() + node = etree.parse(infile, parser) + return node.getroot() + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing child element: %s" % e) + elif in_type == 'yaml': + if isinstance(child, string_types): + return etree.Element(child) + elif isinstance(child, MutableMapping): + if len(child) > 1: + module.fail_json(msg="Can only create children from hashes with one key") + + (key, value) = next(iteritems(child)) + if isinstance(value, MutableMapping): + children = value.pop('_', None) + + node = etree.Element(key, value) + + if children is not None: + if not isinstance(children, list): + module.fail_json(msg="Invalid children type: %s, must be list." % type(children)) + + subnodes = children_to_nodes(module, children) + node.extend(subnodes) + else: + node = etree.Element(key) + node.text = value + return node + else: + module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child)) + else: + module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type) + + +def children_to_nodes(module=None, children=None, type='yaml'): + """turn a str/hash/list of str&hash into a list of elements""" + children = [] if children is None else children + + return [child_to_element(module, child, type) for child in children] + + +def make_pretty(module, tree): + xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + result = dict( + changed=False, + ) + + if module.params['path']: + xml_file = module.params['path'] + with open(xml_file, 'rb') as xml_content: + if xml_string != xml_content.read(): + result['changed'] = True + if not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + elif module.params['xmlstring']: + result['xmlstring'] = xml_string + # NOTE: Modifying a string is not considered a change ! + if xml_string != module.params['xmlstring']: + result['changed'] = True + + module.exit_json(**result) + + +def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()): + + result = dict( + actions=dict( + xpath=xpath, + namespaces=namespaces, + state=module.params['state'] + ), + changed=has_changed(tree), + ) + + if module.params['count'] or hitcount: + result['count'] = hitcount + + if module.params['print_match'] or matches: + result['matches'] = matches + + if msg: + result['msg'] = msg + + if result['changed']: + if module._diff: + result['diff'] = dict( + before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True), + after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True), + ) + + if module.params['path'] and not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + if module.params['xmlstring']: + result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + module.exit_json(**result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', aliases=['dest', 'file']), + xmlstring=dict(type='str'), + xpath=dict(type='str'), + namespaces=dict(type='dict', default={}), + state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), + value=dict(type='raw'), + attribute=dict(type='raw'), + add_children=dict(type='list'), + set_children=dict(type='list'), + count=dict(type='bool', default=False), + print_match=dict(type='bool', default=False), + pretty_print=dict(type='bool', default=False), + content=dict(type='str', choices=['attribute', 'text']), + input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), + backup=dict(type='bool', default=False), + strip_cdata_tags=dict(type='bool', default=False), + insertbefore=dict(type='bool', default=False), + insertafter=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_by=dict( + add_children=['xpath'], + # TODO: Reinstate this in Ansible v2.12 when we have deprecated the incorrect use below + # attribute=['value'], + content=['xpath'], + set_children=['xpath'], + value=['xpath'], + ), + required_if=[ + ['count', True, ['xpath']], + ['print_match', True, ['xpath']], + ['insertbefore', True, ['xpath']], + ['insertafter', True, ['xpath']], + ], + required_one_of=[ + ['path', 'xmlstring'], + ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'], + ], + mutually_exclusive=[ + ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'], + ['path', 'xmlstring'], + ['insertbefore', 'insertafter'], + ], + ) + + xml_file = module.params['path'] + xml_string = module.params['xmlstring'] + xpath = module.params['xpath'] + namespaces = module.params['namespaces'] + state = module.params['state'] + value = json_dict_bytes_to_unicode(module.params['value']) + attribute = module.params['attribute'] + set_children = json_dict_bytes_to_unicode(module.params['set_children']) + add_children = json_dict_bytes_to_unicode(module.params['add_children']) + pretty_print = module.params['pretty_print'] + content = module.params['content'] + input_type = module.params['input_type'] + print_match = module.params['print_match'] + count = module.params['count'] + backup = module.params['backup'] + strip_cdata_tags = module.params['strip_cdata_tags'] + insertbefore = module.params['insertbefore'] + insertafter = module.params['insertafter'] + + # Check if we have lxml 2.3.0 or newer installed + if not HAS_LXML: + module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): + module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine') + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): + module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') + + # Report wrongly used attribute parameter when using content=attribute + # TODO: Remove this in Ansible v2.12 (and reinstate strict parameter test above) and remove the integration test example + if content == 'attribute' and attribute is not None: + module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute, + '2.12', collection_name='ansible.builtin') + + # Check if the file exists + if xml_string: + infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) + elif os.path.isfile(xml_file): + infile = open(xml_file, 'rb') + else: + module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) + + # Parse and evaluate xpath expression + if xpath is not None: + try: + etree.XPath(xpath) + except etree.XPathSyntaxError as e: + module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) + except etree.XPathEvalError as e: + module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) + + # Try to parse in the target XML file + try: + parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) + doc = etree.parse(infile, parser) + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) + + # Ensure we have the original copy to compare + global orig_doc + orig_doc = copy.deepcopy(doc) + + if print_match: + do_print_match(module, doc, xpath, namespaces) + + if count: + count_nodes(module, doc, xpath, namespaces) + + if content == 'attribute': + get_element_attr(module, doc, xpath, namespaces) + elif content == 'text': + get_element_text(module, doc, xpath, namespaces) + + # File exists: + if state == 'absent': + # - absent: delete xpath target + delete_xpath_target(module, doc, xpath, namespaces) + + # - present: carry on + + # children && value both set?: should have already aborted by now + # add_children && set_children both set?: should have already aborted by now + + # set_children set? + if set_children: + set_target_children(module, doc, xpath, namespaces, set_children, input_type) + + # add_children set? + if add_children: + add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter) + + # No?: Carry on + + # Is the xpath target an attribute selector? + if value is not None: + set_target(module, doc, xpath, namespaces, attribute, value) + + # If an xpath was provided, we need to do something with the data + if xpath is not None: + ensure_xpath_exists(module, doc, xpath, namespaces) + + # Otherwise only reformat the xml data? + if pretty_print: + make_pretty(module, doc) + + module.fail_json(msg="Don't know what to do") + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/zypper.py b/test/support/integration/plugins/modules/zypper.py new file mode 100644 index 00000000..bfb31819 --- /dev/null +++ b/test/support/integration/plugins/modules/zypper.py @@ -0,0 +1,540 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com> +# based on +# openbsd_pkg +# (c) 2013 +# Patrik Lundin <patrik.lundin.swe@gmail.com> +# +# yum +# (c) 2012, Red Hat, Inc +# Written by Seth Vidal <skvidal at fedoraproject.org> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zypper +author: + - "Patrick Callahan (@dirtyharrycallahan)" + - "Alexander Gubin (@alxgu)" + - "Thomas O'Donnell (@andytom)" + - "Robin Roth (@robinro)" + - "Andrii Radyk (@AnderEnder)" +version_added: "1.2" +short_description: Manage packages on SUSE and openSUSE +description: + - Manage packages on SUSE and openSUSE using the zypper and rpm tools. +options: + name: + description: + - Package name C(name) or package specifier or a list of either. + - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to + update the package within the version range given. + - You can also pass a url or a local path to a rpm file. + - When using state=latest, this can be '*', which updates all installed packages. + required: true + aliases: [ 'pkg' ] + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. + - When using C(dist-upgrade), I(name) should be C('*'). + required: false + choices: [ present, latest, absent, dist-upgrade ] + default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage, application ] + default: "package" + version_added: "2.0" + extra_args_precommand: + version_added: "2.6" + required: false + description: + - Add additional global target options to C(zypper). + - Options should be supplied in a single line as if given in the command line. + disable_gpg_check: + description: + - Whether to disable to GPG signature checking of the package + signature being installed. Has an effect only if state is + I(present) or I(latest). + required: false + default: "no" + type: bool + disable_recommends: + version_added: "1.8" + description: + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does + install recommended packages. + required: false + default: "yes" + type: bool + force: + version_added: "2.2" + description: + - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. + required: false + default: "no" + type: bool + force_resolution: + version_added: "2.10" + description: + - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). + required: false + default: "no" + type: bool + update_cache: + version_added: "2.2" + description: + - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. + required: false + default: "no" + type: bool + aliases: [ "refresh" ] + oldpackage: + version_added: "2.2" + description: + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a + version is specified as part of the package name. + required: false + default: "no" + type: bool + extra_args: + version_added: "2.4" + required: false + description: + - Add additional options to C(zypper) command. + - Options should be supplied in a single line as if given in the command line. +notes: + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +# informational: requirements for nodes +requirements: + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml + - rpm +''' + +EXAMPLES = ''' +# Install "nmap" +- zypper: + name: nmap + state: present + +# Install apache2 with recommended packages +- zypper: + name: apache2 + state: present + disable_recommends: no + +# Apply a given patch +- zypper: + name: openSUSE-2016-128 + state: present + type: patch + +# Remove the "nmap" package +- zypper: + name: nmap + state: absent + +# Install the nginx rpm from a remote repo +- zypper: + name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' + state: present + +# Install local rpm file +- zypper: + name: /tmp/fancy-software.rpm + state: present + +# Update all packages +- zypper: + name: '*' + state: latest + +# Apply all available patches +- zypper: + name: '*' + state: latest + type: patch + +# Perform a dist-upgrade with additional arguments +- zypper: + name: '*' + state: dist-upgrade + extra_args: '--no-allow-vendor-change --allow-arch-change' + +# Refresh repositories and update package "openssl" +- zypper: + name: openssl + state: present + update_cache: yes + +# Install specific version (possible comparisons: <, >, <=, >=, =) +- zypper: + name: 'docker>=1.10' + state: present + +# Wait 20 seconds to acquire the lock before failing +- zypper: + name: mosh + state: present + environment: + ZYPP_LOCK_TIMEOUT: 20 +''' + +import xml +import re +from xml.dom.minidom import parseString as parseXML +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +class Package: + def __init__(self, name, prefix, version): + self.name = name + self.prefix = prefix + self.version = version + self.shouldinstall = (prefix == '+') + + def __str__(self): + return self.prefix + self.name + self.version + + +def split_name_version(name): + """splits of the package name and desired version + + example formats: + - docker>=1.10 + - apache=2.4 + + Allowed version specifiers: <, >, <=, >=, = + Allowed version format: [0-9.-]* + + Also allows a prefix indicating remove "-", "~" or install "+" + """ + + prefix = '' + if name[0] in ['-', '~', '+']: + prefix = name[0] + name = name[1:] + if prefix == '~': + prefix = '-' + + version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') + try: + reres = version_check.match(name) + name, version = reres.groups() + if version is None: + version = '' + return prefix, name, version + except Exception: + return prefix, name, '' + + +def get_want_state(names, remove=False): + packages = [] + urls = [] + for name in names: + if '://' in name or name.endswith('.rpm'): + urls.append(name) + else: + prefix, pname, version = split_name_version(name) + if prefix not in ['-', '+']: + if remove: + prefix = '-' + else: + prefix = '+' + packages.append(Package(pname, prefix, version)) + return packages, urls + + +def get_installed_state(m, packages): + "get installed state of packages" + + cmd = get_cmd(m, 'search') + cmd.extend(['--match-exact', '--details', '--installed-only']) + cmd.extend([p.name for p in packages]) + return parse_zypper_xml(m, cmd, fail_not_found=False)[0] + + +def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + try: + dom = parseXML(stdout) + except xml.parsers.expat.ExpatError as exc: + m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc), + rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + if rc == 104: + # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) + if fail_not_found: + errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data + m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + else: + return {}, rc, stdout, stderr + elif rc in [0, 106, 103]: + # zypper exit codes + # 0: success + # 106: signature verification failed + # 103: zypper was upgraded, run same command again + if packages is None: + firstrun = True + packages = {} + solvable_list = dom.getElementsByTagName('solvable') + for solvable in solvable_list: + name = solvable.getAttribute('name') + packages[name] = {} + packages[name]['version'] = solvable.getAttribute('edition') + packages[name]['oldversion'] = solvable.getAttribute('edition-old') + status = solvable.getAttribute('status') + packages[name]['installed'] = status == "installed" + packages[name]['group'] = solvable.parentNode.nodeName + if rc == 103 and firstrun: + # if this was the first run and it failed with 103 + # run zypper again with the same command to complete update + return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + + return packages, rc, stdout, stderr + m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + +def get_cmd(m, subcommand): + "puts together the basic zypper command arguments with those passed to the module" + is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] + is_refresh = subcommand == 'refresh' + cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] + if m.params['extra_args_precommand']: + args_list = m.params['extra_args_precommand'].split() + cmd.extend(args_list) + # add global options before zypper command + if (is_install or is_refresh) and m.params['disable_gpg_check']: + cmd.append('--no-gpg-checks') + + if subcommand == 'search': + cmd.append('--disable-repositories') + + cmd.append(subcommand) + if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: + cmd.extend(['--type', m.params['type']]) + if m.check_mode and subcommand != 'search': + cmd.append('--dry-run') + if is_install: + cmd.append('--auto-agree-with-licenses') + if m.params['disable_recommends']: + cmd.append('--no-recommends') + if m.params['force']: + cmd.append('--force') + if m.params['force_resolution']: + cmd.append('--force-resolution') + if m.params['oldpackage']: + cmd.append('--oldpackage') + if m.params['extra_args']: + args_list = m.params['extra_args'].split(' ') + cmd.extend(args_list) + + return cmd + + +def set_diff(m, retvals, result): + # TODO: if there is only one package, set before/after to version numbers + packages = {'installed': [], 'removed': [], 'upgraded': []} + if result: + for p in result: + group = result[p]['group'] + if group == 'to-upgrade': + versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' + packages['upgraded'].append(p + versions) + elif group == 'to-install': + packages['installed'].append(p) + elif group == 'to-remove': + packages['removed'].append(p) + + output = '' + for state in packages: + if packages[state]: + output += state + ': ' + ', '.join(packages[state]) + '\n' + if 'diff' not in retvals: + retvals['diff'] = {} + if 'prepared' not in retvals['diff']: + retvals['diff']['prepared'] = output + else: + retvals['diff']['prepared'] += '\n' + output + + +def package_present(m, name, want_latest): + "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + packages, urls = get_want_state(name) + + # add oldpackage flag when a version is given to allow downgrades + if any(p.version for p in packages): + m.params['oldpackage'] = True + + if not want_latest: + # for state=present: filter out already installed packages + # if a version is given leave the package in to let zypper handle the version + # resolution + packageswithoutversion = [p for p in packages if not p.version] + prerun_state = get_installed_state(m, packageswithoutversion) + # generate lists of packages to install or remove + packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] + + if not packages and not urls: + # nothing to install/remove and nothing to update + return None, retvals + + # zypper install also updates packages + cmd = get_cmd(m, 'install') + cmd.append('--') + cmd.extend(urls) + # pass packages to zypper + # allow for + or - prefixes in install/remove lists + # also add version specifier if given + # do this in one zypper run to allow for dependency-resolution + # for example "-exim postfix" runs without removing packages depending on mailserver + cmd.extend([str(p) for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return result, retvals + + +def package_update_all(m): + "run update or patch on all available packages" + + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + if m.params['type'] == 'patch': + cmdname = 'patch' + elif m.params['state'] == 'dist-upgrade': + cmdname = 'dist-upgrade' + else: + cmdname = 'update' + + cmd = get_cmd(m, cmdname) + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def package_absent(m, name): + "remove the packages in name" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + # Get package state + packages, urls = get_want_state(name, remove=True) + if any(p.prefix == '+' for p in packages): + m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") + if urls: + m.fail_json(msg="Can not remove via URL.") + if m.params['type'] == 'patch': + m.fail_json(msg="Can not remove patches.") + prerun_state = get_installed_state(m, packages) + packages = [p for p in packages if p.name in prerun_state] + + if not packages: + return None, retvals + + cmd = get_cmd(m, 'remove') + cmd.extend([p.name + p.version for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def repo_refresh(m): + "update the repositories" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + + cmd = get_cmd(m, 'refresh') + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return retvals + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['pkg'], type='list'), + state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), + type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), + extra_args_precommand=dict(required=False, default=None), + disable_gpg_check=dict(required=False, default='no', type='bool'), + disable_recommends=dict(required=False, default='yes', type='bool'), + force=dict(required=False, default='no', type='bool'), + force_resolution=dict(required=False, default='no', type='bool'), + update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'), + oldpackage=dict(required=False, default='no', type='bool'), + extra_args=dict(required=False, default=None), + ), + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + update_cache = module.params['update_cache'] + + # remove empty strings from package list + name = list(filter(None, name)) + + # Refresh repositories + if update_cache and not module.check_mode: + retvals = repo_refresh(module) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper refresh run failed.", **retvals) + + # Perform requested action + if name == ['*'] and state in ['latest', 'dist-upgrade']: + packages_changed, retvals = package_update_all(module) + elif name != ['*'] and state == 'dist-upgrade': + module.fail_json(msg="Can not dist-upgrade specific packages.") + else: + if state in ['absent', 'removed']: + packages_changed, retvals = package_absent(module, name) + elif state in ['installed', 'present', 'latest']: + packages_changed, retvals = package_present(module, name, state == 'latest') + + retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) + + if module._diff: + set_diff(module, retvals, packages_changed) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper run failed.", **retvals) + + if not retvals['changed']: + del retvals['stdout'] + del retvals['stderr'] + + module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) + + +if __name__ == "__main__": + main() |