diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:35 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:35 +0000 |
commit | 7fec0b69a082aaeec72fee0612766aa42f6b1b4d (patch) | |
tree | efb569b86ca4da888717f5433e757145fa322e08 /ansible_collections/community/aws/plugins/modules/elasticache.py | |
parent | Releasing progress-linux version 7.7.0+dfsg-3~progress7.99u1. (diff) | |
download | ansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.tar.xz ansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.zip |
Merging upstream version 9.4.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/community/aws/plugins/modules/elasticache.py')
-rw-r--r-- | ansible_collections/community/aws/plugins/modules/elasticache.py | 345 |
1 files changed, 174 insertions, 171 deletions
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache.py b/ansible_collections/community/aws/plugins/modules/elasticache.py index 454baafe3..d45509cb6 100644 --- a/ansible_collections/community/aws/plugins/modules/elasticache.py +++ b/ansible_collections/community/aws/plugins/modules/elasticache.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elasticache version_added: 1.0.0 @@ -15,7 +12,8 @@ short_description: Manage cache clusters in Amazon ElastiCache description: - Manage cache clusters in Amazon ElastiCache. - Returns information about the specified cache cluster. -author: "Jim Dalton (@jsdalton)" +author: + - "Jim Dalton (@jsdalton)" options: state: description: @@ -97,15 +95,15 @@ options: - Defaults to C(false). type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: Basic example community.aws.elasticache: @@ -113,7 +111,7 @@ EXAMPLES = r""" state: present engine: memcached cache_engine_version: 1.4.14 - node_type: cache.m1.small + node_type: cache.m3.small num_nodes: 1 cache_port: 11211 cache_security_groups: @@ -130,8 +128,8 @@ EXAMPLES = r""" community.aws.elasticache: name: "test-please-delete" state: rebooted - """ + from time import sleep try: @@ -139,21 +137,34 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class ElastiCacheManager(object): - """Handles elasticache creation and destruction""" +class ElastiCacheManager: - EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] + """Handles elasticache creation and destruction""" - def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, cache_parameter_group, cache_subnet_group, - cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs): + EXIST_STATUSES = ["available", "creating", "rebooting", "modifying"] + + def __init__( + self, + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ): self.module = module self.name = name self.engine = engine.lower() @@ -169,12 +180,9 @@ class ElastiCacheManager(object): self.wait = wait self.hard_modify = hard_modify - self.region = region - self.aws_connect_kwargs = aws_connect_kwargs - self.changed = False self.data = None - self.status = 'gone' + self.status = "gone" self.conn = self._get_elasticache_connection() self._refresh_data() @@ -199,32 +207,33 @@ class ElastiCacheManager(object): def create(self): """Create an ElastiCache cluster""" - if self.status == 'available': + if self.status == "available": return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") else: - msg = "'%s' is currently deleting. Cannot create." - self.module.fail_json(msg=msg % self.name) - - kwargs = dict(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeType=self.node_type, - Engine=self.engine, - EngineVersion=self.cache_engine_version, - CacheSecurityGroupNames=self.cache_security_groups, - SecurityGroupIds=self.security_group_ids, - CacheParameterGroupName=self.cache_parameter_group, - CacheSubnetGroupName=self.cache_subnet_group) + self.module.fail_json(msg=f"'{self.name}' is currently deleting. Cannot create.") + + kwargs = dict( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeType=self.node_type, + Engine=self.engine, + EngineVersion=self.cache_engine_version, + CacheSecurityGroupNames=self.cache_security_groups, + SecurityGroupIds=self.security_group_ids, + CacheParameterGroupName=self.cache_parameter_group, + CacheSubnetGroupName=self.cache_subnet_group, + ) if self.cache_port is not None: - kwargs['Port'] = self.cache_port + kwargs["Port"] = self.cache_port if self.zone is not None: - kwargs['PreferredAvailabilityZone'] = self.zone + kwargs["PreferredAvailabilityZone"] = self.zone try: self.conn.create_cache_cluster(**kwargs) @@ -236,45 +245,43 @@ class ElastiCacheManager(object): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return True def delete(self): """Destroy an ElastiCache cluster""" - if self.status == 'gone': + if self.status == "gone": return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot delete." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot delete.") try: response = self.conn.delete_cache_cluster(CacheClusterId=self.name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to delete cache cluster") - cache_cluster_data = response['CacheCluster'] + cache_cluster_data = response["CacheCluster"] self._refresh_data(cache_cluster_data) self.changed = True if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") def sync(self): """Sync settings to cluster if required""" if not self.exists(): - msg = "'%s' is %s. Cannot sync." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot sync.") - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: # Cluster can only be synced if available. If we can't wait # for this, then just be done. @@ -282,11 +289,13 @@ class ElastiCacheManager(object): if self._requires_destroy_and_create(): if not self.hard_modify: - msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'hard_modify' must be set to true to proceed." + ) if not self.wait: - msg = "'%s' requires destructive modification. 'wait' must be set to true." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'wait' must be set to true to proceed." + ) self.delete() self.create() return @@ -298,14 +307,16 @@ class ElastiCacheManager(object): """Modify the cache cluster. Note it's only possible to modify a few select options.""" nodes_to_remove = self._get_nodes_to_remove() try: - self.conn.modify_cache_cluster(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeIdsToRemove=nodes_to_remove, - CacheSecurityGroupNames=self.cache_security_groups, - CacheParameterGroupName=self.cache_parameter_group, - SecurityGroupIds=self.security_group_ids, - ApplyImmediately=True, - EngineVersion=self.cache_engine_version) + self.conn.modify_cache_cluster( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeIdsToRemove=nodes_to_remove, + CacheSecurityGroupNames=self.cache_security_groups, + CacheParameterGroupName=self.cache_parameter_group, + SecurityGroupIds=self.security_group_ids, + ApplyImmediately=True, + EngineVersion=self.cache_engine_version, + ) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to modify cache cluster") @@ -313,27 +324,24 @@ class ElastiCacheManager(object): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def reboot(self): """Reboot the cache cluster""" if not self.exists(): - msg = "'%s' is %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status == 'rebooting': + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot reboot.") + if self.status == "rebooting": return - if self.status in ['creating', 'modifying']: + if self.status in ["creating", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot reboot.") # Collect ALL nodes for reboot - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] try: - self.conn.reboot_cache_cluster(CacheClusterId=self.name, - CacheNodeIdsToReboot=cache_node_ids) + self.conn.reboot_cache_cluster(CacheClusterId=self.name, CacheNodeIdsToReboot=cache_node_ids) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to reboot cache cluster") @@ -341,36 +349,28 @@ class ElastiCacheManager(object): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def get_info(self): """Return basic info about the cache cluster""" - info = { - 'name': self.name, - 'status': self.status - } + info = {"name": self.name, "status": self.status} if self.data: - info['data'] = self.data + info["data"] = self.data return info def _wait_for_status(self, awaited_status): """Wait for status to change from present status to awaited_status""" - status_map = { - 'creating': 'available', - 'rebooting': 'available', - 'modifying': 'available', - 'deleting': 'gone' - } + status_map = {"creating": "available", "rebooting": "available", "modifying": "available", "deleting": "gone"} if self.status == awaited_status: # No need to wait, we're already done return if status_map[self.status] != awaited_status: - msg = "Invalid awaited status. '%s' cannot transition to '%s'" - self.module.fail_json(msg=msg % (self.status, awaited_status)) + self.module.fail_json( + msg=f"Invalid awaited status. '{self.status}' cannot transition to '{awaited_status}'" + ) if awaited_status not in set(status_map.values()): - msg = "'%s' is not a valid awaited status." - self.module.fail_json(msg=msg % awaited_status) + self.module.fail_json(msg=f"'{awaited_status}' is not a valid awaited status.") while True: sleep(1) @@ -381,27 +381,24 @@ class ElastiCacheManager(object): def _requires_modification(self): """Check if cluster requires (nondestructive) modification""" # Check modifiable data attributes - modifiable_data = { - 'NumCacheNodes': self.num_nodes, - 'EngineVersion': self.cache_engine_version - } + modifiable_data = {"NumCacheNodes": self.num_nodes, "EngineVersion": self.cache_engine_version} for key, value in modifiable_data.items(): if value is not None and value and self.data[key] != value: return True # Check cache security groups cache_security_groups = [] - for sg in self.data['CacheSecurityGroups']: - cache_security_groups.append(sg['CacheSecurityGroupName']) + for sg in self.data["CacheSecurityGroups"]: + cache_security_groups.append(sg["CacheSecurityGroupName"]) if set(cache_security_groups) != set(self.cache_security_groups): return True # check vpc security groups if self.security_group_ids: vpc_security_groups = [] - security_groups = self.data.get('SecurityGroups', []) + security_groups = self.data.get("SecurityGroups", []) for sg in security_groups: - vpc_security_groups.append(sg['SecurityGroupId']) + vpc_security_groups.append(sg["SecurityGroupId"]) if set(vpc_security_groups) != set(self.security_group_ids): return True @@ -412,13 +409,13 @@ class ElastiCacheManager(object): Check whether a destroy and create is required to synchronize cluster. """ unmodifiable_data = { - 'node_type': self.data['CacheNodeType'], - 'engine': self.data['Engine'], - 'cache_port': self._get_port() + "node_type": self.data["CacheNodeType"], + "engine": self.data["Engine"], + "cache_port": self._get_port(), } # Only check for modifications if zone is specified if self.zone is not None: - unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone'] + unmodifiable_data["zone"] = self.data["PreferredAvailabilityZone"] for key, value in unmodifiable_data.items(): if getattr(self, key) is not None and getattr(self, key) != value: return True @@ -427,18 +424,18 @@ class ElastiCacheManager(object): def _get_elasticache_connection(self): """Get an elasticache connection""" try: - return self.module.client('elasticache') + return self.module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to connect to AWS') + self.module.fail_json_aws(e, msg="Failed to connect to AWS") def _get_port(self): """Get the port. Where this information is retrieved from is engine dependent.""" - if self.data['Engine'] == 'memcached': - return self.data['ConfigurationEndpoint']['Port'] - elif self.data['Engine'] == 'redis': + if self.data["Engine"] == "memcached": + return self.data["ConfigurationEndpoint"]["Port"] + elif self.data["Engine"] == "redis": # Redis only supports a single node (presently) so just use # the first and only - return self.data['CacheNodes'][0]['Endpoint']['Port'] + return self.data["CacheNodes"][0]["Endpoint"]["Port"] def _refresh_data(self, cache_cluster_data=None): """Refresh data about this cache cluster""" @@ -446,104 +443,110 @@ class ElastiCacheManager(object): if cache_cluster_data is None: try: response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True) - except is_boto3_error_code('CacheClusterNotFound'): + except is_boto3_error_code("CacheClusterNotFound"): self.data = None - self.status = 'gone' + self.status = "gone" return except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Failed to describe cache clusters") - cache_cluster_data = response['CacheClusters'][0] + cache_cluster_data = response["CacheClusters"][0] self.data = cache_cluster_data - self.status = self.data['CacheClusterStatus'] + self.status = self.data["CacheClusterStatus"] # The documentation for elasticache lies -- status on rebooting is set # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it # here to make status checks etc. more sane. - if self.status == 'rebooting cache cluster nodes': - self.status = 'rebooting' + if self.status == "rebooting cache cluster nodes": + self.status = "rebooting" def _get_nodes_to_remove(self): """If there are nodes to remove, it figures out which need to be removed""" - num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes + num_nodes_to_remove = self.data["NumCacheNodes"] - self.num_nodes if num_nodes_to_remove <= 0: return [] if not self.hard_modify: - msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." + ) - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] return cache_node_ids[-num_nodes_to_remove:] def main(): - """ elasticache ansible module """ + """elasticache ansible module""" argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'rebooted']), + state=dict(required=True, choices=["present", "absent", "rebooted"]), name=dict(required=True), - engine=dict(default='memcached'), + engine=dict(default="memcached"), cache_engine_version=dict(default=""), - node_type=dict(default='cache.t2.small'), - num_nodes=dict(default=1, type='int'), + node_type=dict(default="cache.t2.small"), + num_nodes=dict(default=1, type="int"), # alias for compat with the original PR 1950 - cache_parameter_group=dict(default="", aliases=['parameter_group']), - cache_port=dict(type='int'), + cache_parameter_group=dict(default="", aliases=["parameter_group"]), + cache_port=dict(type="int"), cache_subnet_group=dict(default=""), - cache_security_groups=dict(default=[], type='list', elements='str'), - security_group_ids=dict(default=[], type='list', elements='str'), + cache_security_groups=dict(default=[], type="list", elements="str"), + security_group_ids=dict(default=[], type="list", elements="str"), zone=dict(), - wait=dict(default=True, type='bool'), - hard_modify=dict(type='bool'), + wait=dict(default=True, type="bool"), + hard_modify=dict(type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, ) - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - - name = module.params['name'] - state = module.params['state'] - engine = module.params['engine'] - cache_engine_version = module.params['cache_engine_version'] - node_type = module.params['node_type'] - num_nodes = module.params['num_nodes'] - cache_port = module.params['cache_port'] - cache_subnet_group = module.params['cache_subnet_group'] - cache_security_groups = module.params['cache_security_groups'] - security_group_ids = module.params['security_group_ids'] - zone = module.params['zone'] - wait = module.params['wait'] - hard_modify = module.params['hard_modify'] - cache_parameter_group = module.params['cache_parameter_group'] + name = module.params["name"] + state = module.params["state"] + engine = module.params["engine"] + cache_engine_version = module.params["cache_engine_version"] + node_type = module.params["node_type"] + num_nodes = module.params["num_nodes"] + cache_port = module.params["cache_port"] + cache_subnet_group = module.params["cache_subnet_group"] + cache_security_groups = module.params["cache_security_groups"] + security_group_ids = module.params["security_group_ids"] + zone = module.params["zone"] + wait = module.params["wait"] + hard_modify = module.params["hard_modify"] + cache_parameter_group = module.params["cache_parameter_group"] if cache_subnet_group and cache_security_groups: module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups") - if state == 'present' and not num_nodes: + if state == "present" and not num_nodes: module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") - elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, - num_nodes, cache_port, - cache_parameter_group, - cache_subnet_group, - cache_security_groups, - security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs) + elasticache_manager = ElastiCacheManager( + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ) - if state == 'present': + if state == "present": elasticache_manager.ensure_present() - elif state == 'absent': + elif state == "absent": elasticache_manager.ensure_absent() - elif state == 'rebooted': + elif state == "rebooted": elasticache_manager.ensure_rebooted() - facts_result = dict(changed=elasticache_manager.changed, - elasticache=elasticache_manager.get_info()) + facts_result = dict(changed=elasticache_manager.changed, elasticache=elasticache_manager.get_info()) module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() |