summaryrefslogtreecommitdiffstats
path: root/ansible_collections/dellemc/powerflex/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:35 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:35 +0000
commit7fec0b69a082aaeec72fee0612766aa42f6b1b4d (patch)
treeefb569b86ca4da888717f5433e757145fa322e08 /ansible_collections/dellemc/powerflex/plugins
parentReleasing progress-linux version 7.7.0+dfsg-3~progress7.99u1. (diff)
downloadansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.tar.xz
ansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.zip
Merging upstream version 9.4.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/dellemc/powerflex/plugins')
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py10
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py0
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py121
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py45
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py33
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/device.py85
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py380
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/info.py818
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py42
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py8
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py252
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py21
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/sdc.py114
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/sds.py708
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py22
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py828
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py5
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/volume.py18
18 files changed, 2985 insertions, 525 deletions
diff --git a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py
index 349680345..0c0e0d9e1 100644
--- a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py
+++ b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2020, Dell Technologies.
+# Copyright: (c) 2024, Dell Technologies.
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
from __future__ import absolute_import, division, print_function
@@ -48,12 +48,12 @@ class ModuleDocFragment(object):
- Time after which connection will get terminated.
- It is to be mentioned in seconds.
type: int
- required: False
+ required: false
default: 120
requirements:
- - A Dell PowerFlex storage system version 3.5 or later.
- - Ansible-core 2.12 or later.
- - PyPowerFlex 1.6.0.
+ - A Dell PowerFlex storage system version 3.6 or later.
+ - Ansible-core 2.14 or later.
+ - PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
notes:
- The modules present in the collection named as 'dellemc.powerflex'
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py
new file mode 100644
index 000000000..b7ca3ec9a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py
@@ -0,0 +1,121 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('configuration')
+
+
+class Configuration:
+
+ """
+ The configuration SDK class with shared configuration operations.
+ """
+
+ def __init__(self, powerflex_conn, module):
+ """
+ Initialize the configuration class
+ :param configuration: The configuration SDK instance
+ :param module: Ansible module object
+ """
+ self.module = module
+ self.powerflex_conn = powerflex_conn
+
+ def get_protection_domain(
+ self, protection_domain_name=None, protection_domain_id=None
+ ):
+ """
+ Get protection domain details
+ :param protection_domain_name: Name of the protection domain
+ :param protection_domain_id: ID of the protection domain
+ :return: Protection domain details if exists
+ :rtype: dict
+ """
+
+ name_or_id = (
+ protection_domain_id if protection_domain_id else protection_domain_name
+ )
+
+ try:
+ if protection_domain_id:
+ pd_details = self.powerflex_conn.protection_domain.get(
+ filter_fields={"id": protection_domain_id}
+ )
+
+ else:
+ pd_details = self.powerflex_conn.protection_domain.get(
+ filter_fields={"name": protection_domain_name}
+ )
+
+ if len(pd_details) == 0:
+ error_msg = (
+ "Unable to find the protection domain with " "'%s'." % name_or_id
+ )
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ return pd_details[0]
+
+ except Exception as e:
+ error_msg = (
+ "Failed to get the protection domain '%s' with "
+ "error '%s'" % (name_or_id, str(e))
+ )
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_fault_set(self, fault_set_name=None, fault_set_id=None, protection_domain_id=None):
+ """Get fault set details
+ :param fault_set_name: Name of the fault set
+ :param fault_set_id: Id of the fault set
+ :param protection_domain_id: ID of the protection domain
+ :return: Fault set details
+ :rtype: dict
+ """
+ name_or_id = fault_set_id if fault_set_id \
+ else fault_set_name
+ try:
+ fs_details = {}
+ if fault_set_id:
+ fs_details = self.powerflex_conn.fault_set.get(
+ filter_fields={'id': name_or_id})
+
+ if fault_set_name:
+ fs_details = self.powerflex_conn.fault_set.get(
+ filter_fields={'name': name_or_id, 'protectionDomainId': protection_domain_id})
+
+ if not fs_details:
+ msg = f"Unable to find the fault set with {name_or_id}"
+ LOG.info(msg)
+ return None
+
+ return fs_details[0]
+
+ except Exception as e:
+ error_msg = f"Failed to get the fault set '{name_or_id}' with error '{str(e)}'"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_associated_sds(self, fault_set_id=None):
+ """Get associated SDS to a fault set
+ :param fault_set_id: Id of the fault set
+ :return: Associated SDS details
+ :rtype: dict
+ """
+ try:
+ if fault_set_id:
+ sds_details = self.powerflex_conn.fault_set.get_sdss(
+ fault_set_id=fault_set_id)
+
+ return sds_details
+
+ except Exception as e:
+ error_msg = f"Failed to get the associated SDS with error '{str(e)}'"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py
new file mode 100644
index 000000000..0cfb2659f
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py
@@ -0,0 +1,45 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('powerflex_base')
+
+
+class PowerFlexBase:
+
+ '''PowerFlex Base Class'''
+
+ def __init__(self, ansible_module, ansible_module_params):
+ """
+ Initialize the powerflex base class
+
+ :param ansible_module: Ansible module class
+ :type ansible_module: AnsibleModule
+ :param ansible_module_params: Parameters for ansible module class
+ :type ansible_module_params: dict
+ """
+ self.module_params = utils.get_powerflex_gateway_host_parameters()
+ ansible_module_params['argument_spec'].update(self.module_params)
+
+ # Initialize the ansible module
+ self.module = ansible_module(
+ **ansible_module_params
+ )
+
+ utils.ensure_required_libs(self.module)
+ self.result = {"changed": False}
+
+ try:
+ self.powerflex_conn = utils.get_powerflex_gateway_host_connection(
+ self.module.params)
+ LOG.info("Got the PowerFlex system connection object instance")
+ except Exception as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py
index 8503aeb0c..94024d498 100644
--- a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2021, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
from __future__ import absolute_import, division, print_function
@@ -17,14 +17,7 @@ from ansible.module_utils.basic import missing_required_lib
"""import PyPowerFlex lib"""
try:
from PyPowerFlex import PowerFlexClient
- from PyPowerFlex.objects.sds import Sds
- from PyPowerFlex.objects import protection_domain
- from PyPowerFlex.objects import storage_pool
- from PyPowerFlex.objects import sdc
- from PyPowerFlex.objects import volume
- from PyPowerFlex.objects import system
- from PyPowerFlex.objects.system import SnapshotDef
-
+ from PyPowerFlex.objects.system import SnapshotDef # pylint: disable=unused-import
HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = True, None
except ImportError:
HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = False, traceback.format_exc()
@@ -40,7 +33,7 @@ except ImportError:
"""importing dateutil"""
try:
- import dateutil.relativedelta
+ import dateutil.relativedelta # noqa # pylint: disable=unused-import
HAS_DATEUTIL, DATEUTIL_IMP_ERR = True, None
except ImportError:
HAS_DATEUTIL, DATEUTIL_IMP_ERR = False, traceback.format_exc()
@@ -87,10 +80,10 @@ def ensure_required_libs(module):
exception=PKG_RSRC_IMP_ERR)
if not HAS_POWERFLEX_SDK:
- module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.6.0 or above"),
+ module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.9.0 or above"),
exception=POWERFLEX_SDK_IMP_ERR)
- min_ver = '1.6.0'
+ min_ver = '1.9.0'
try:
curr_version = pkg_resources.require("PyPowerFlex")[0].version
supported_version = (parse_version(curr_version) >= parse_version(min_ver))
@@ -184,3 +177,19 @@ def is_invalid_name(name):
regexp = re.compile(r'^[a-zA-Z0-9!@#$%^~*_-]*$')
if not regexp.search(name):
return True
+
+
+def get_time_minutes(time, time_unit):
+ """Convert the given time to minutes"""
+
+ if time is not None and time > 0:
+ if time_unit in ('Hour'):
+ return time * 60
+ elif time_unit in ('Day'):
+ return time * 60 * 24
+ elif time_unit in ('Week'):
+ return time * 60 * 24 * 7
+ else:
+ return time
+ else:
+ return 0
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/device.py b/ansible_collections/dellemc/powerflex/plugins/modules/device.py
index a321315e3..e83353185 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/device.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/device.py
@@ -109,6 +109,14 @@ options:
choices: ['present', 'absent']
required: true
type: str
+ force:
+ description:
+ - Using the Force flag to add a device.
+ - Use this flag, to overwrite existing data on the device.
+ - Use this flag with caution, because all data on the device will be
+ destroyed.
+ type: bool
+ default: false
notes:
- The value for device_id is generated only after successful addition of the
device.
@@ -135,6 +143,22 @@ EXAMPLES = r'''
protection_domain_name: "domain1"
external_acceleration_type: "ReadAndWrite"
state: "present"
+- name: Add a device with force flag
+ dellemc.powerflex.device:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ media_type: "HDD"
+ device_name: "device2"
+ storage_pool_name: "pool1"
+ protection_domain_name: "domain1"
+ external_acceleration_type: "ReadAndWrite"
+ force: true
+ state: "present"
- name: Get device details using device_id
dellemc.powerflex.device:
hostname: "{{hostname}}"
@@ -166,23 +190,23 @@ EXAMPLES = r'''
state: "present"
- name: Remove a device using device_id
dellemc.powerflex.device:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
- device_id: "76eb7e2f00010000"
- state: "absent"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ device_id: "76eb7e2f00010000"
+ state: "absent"
- name: Remove a device using (current_pathname, sds_id)
dellemc.powerflex.device:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
- current_pathname: "/dev/sdb"
- sds_name: "node1"
- state: "absent"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ state: "absent"
'''
RETURN = r'''
@@ -715,12 +739,11 @@ class PowerFlexDevice(object):
self.powerflex_conn.device.create(
current_pathname=current_pathname,
- sds_id=sds_id,
- acceleration_pool_id=acceleration_pool_id,
+ sds_id=sds_id, acceleration_pool_id=acceleration_pool_id,
external_acceleration_type=external_acceleration_type,
- media_type=media_type,
- name=device_name,
- storage_pool_id=storage_pool_id)
+ media_type=media_type, name=device_name,
+ storage_pool_id=storage_pool_id,
+ force=self.module.params['force'])
return True
except Exception as e:
error_msg = "Adding device %s operation failed with " \
@@ -1076,21 +1099,15 @@ def get_powerflex_device_parameters():
"""This method provide parameter required for the device module on
PowerFlex"""
return dict(
- current_pathname=dict(),
- device_name=dict(),
- device_id=dict(),
- sds_name=dict(),
- sds_id=dict(),
- storage_pool_name=dict(),
- storage_pool_id=dict(),
- acceleration_pool_id=dict(),
- acceleration_pool_name=dict(),
- protection_domain_name=dict(),
- protection_domain_id=dict(),
- external_acceleration_type=dict(choices=['Invalid', 'None', 'Read',
- 'Write', 'ReadAndWrite']),
+ current_pathname=dict(), device_name=dict(), device_id=dict(),
+ sds_name=dict(), sds_id=dict(), storage_pool_name=dict(),
+ storage_pool_id=dict(), acceleration_pool_id=dict(),
+ acceleration_pool_name=dict(), protection_domain_name=dict(),
+ protection_domain_id=dict(), external_acceleration_type=dict(
+ choices=['Invalid', 'None', 'Read', 'Write', 'ReadAndWrite']),
media_type=dict(choices=['HDD', 'SSD', 'NVDIMM']),
- state=dict(required=True, type='str', choices=['present', 'absent'])
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ force=dict(type='bool', default=False)
)
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py b/ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py
new file mode 100644
index 000000000..bfa926dd6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2024, Dell Technologies
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing Fault Sets on Dell Technologies (Dell) PowerFlex"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: fault_set
+version_added: '2.2.0'
+short_description: Manage Fault Sets on Dell PowerFlex
+description:
+- Managing fault sets on PowerFlex storage system includes creating,
+ getting details, renaming and deleting a fault set.
+author:
+- Carlos Tronco (@ctronco) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+extends_documentation_fragment:
+ - dellemc.powerflex.powerflex
+options:
+ fault_set_name:
+ description:
+ - Name of the Fault Set.
+ - Mutually exclusive with I(fault_set_id).
+ type: str
+ fault_set_id:
+ description:
+ - ID of the Fault Set.
+ - Mutually exclusive with I(fault_set_name).
+ type: str
+ protection_domain_name:
+ description:
+ - Name of protection domain.
+ - Mutually exclusive with I(protection_domain_id).
+ type: str
+ protection_domain_id:
+ description:
+ - ID of the protection domain.
+ - Mutually exclusive with I(protection_domain_name).
+ type: str
+ fault_set_new_name:
+ description:
+ - New name of the fault set.
+ type: str
+ state:
+ description:
+ - State of the Fault Set.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - The I(check_mode) is supported.
+ - When I(fault_set_name) is provided, I(protection_domain_name)
+ or I(protection_domain_id) must be provided.
+'''
+
+
+EXAMPLES = r'''
+
+- name: Create Fault Set on Protection Domain
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_name: "{{ fault_set_name }}"
+ protection_domain_name: "{{ pd_name }}"
+ state: present
+
+- name: Rename Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_name: "{{ fault_set_name }}"
+ fault_set_new_name: "{{ fault_set_new_name }}"
+ state: present
+
+- name: Get details of a Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_id: "{{ fault_set_id }}"
+ state: present
+
+- name: Delete Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_id: "{{ fault_set_id }}"
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: 'false'
+
+fault_set_details:
+ description: Details of fault set.
+ returned: always
+ type: dict
+ contains:
+ protectionDomainId:
+ description: Unique identifier of the protection domain.
+ type: str
+ protectionDomainName:
+ description: Name of the protection domain.
+ type: str
+ name:
+ description: Name of the fault set.
+ type: str
+ id:
+ description: Unique identifier of the fault set.
+ type: str
+ SDS:
+ description: List of SDS associated to the fault set.
+ type: list
+ elements: dict
+ links:
+ description: Fault set links.
+ type: list
+ contains:
+ href:
+ description: Fault Set instance URL.
+ type: str
+ rel:
+ description: Relationship of fault set with different
+ entities.
+ type: str
+ sample: {
+ "protectionDomainId": "da721a8300000000",
+ "protectionDomainName": "sample-pd",
+ "name": "fs_001",
+ "id": "eb44b70500000000",
+ "links": []
+ }
+
+'''
+
+
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell import (
+ utils,
+)
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.powerflex_base \
+ import PowerFlexBase
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \
+ import Configuration
+from ansible.module_utils.basic import AnsibleModule
+
+
+LOG = utils.get_logger("fault_set")
+
+
+class PowerFlexFaultSet(PowerFlexBase):
+ """Class with FaultSet operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+
+ mutually_exclusive = [
+ ["fault_set_name", "fault_set_id"],
+ ["protection_domain_name", "protection_domain_id"],
+ ]
+ required_one_of = [["fault_set_name", "fault_set_id"]]
+
+ ansible_module_params = {
+ 'argument_spec': get_powerflex_fault_set_parameters(),
+ 'supports_check_mode': True,
+ 'mutually_exclusive': mutually_exclusive,
+ 'required_one_of': required_one_of
+ }
+ super().__init__(AnsibleModule, ansible_module_params)
+
+ self.result = dict(
+ changed=False,
+ fault_set_details={}
+ )
+
+ def get_protection_domain(
+ self, protection_domain_name=None, protection_domain_id=None
+ ):
+ """Get the details of a protection domain in a given PowerFlex storage
+ system"""
+ return Configuration(self.powerflex_conn, self.module).get_protection_domain(
+ protection_domain_name=protection_domain_name, protection_domain_id=protection_domain_id)
+
+ def get_associated_sds(
+ self, fault_set_id=None
+ ):
+ """Get the details of SDS associated to given fault set in a given PowerFlex storage
+ system"""
+ return Configuration(self.powerflex_conn, self.module).get_associated_sds(
+ fault_set_id=fault_set_id)
+
+ def create_fault_set(self, fault_set_name, protection_domain_id):
+ """
+ Create Fault Set
+ :param fault_set_name: Name of the fault set
+ :type fault_set_name: str
+ :param protection_domain_id: ID of the protection domain
+ :type protection_domain_id: str
+ :return: Boolean indicating if create operation is successful
+ """
+ try:
+ if not self.module.check_mode:
+ msg = (f"Creating fault set with name: {fault_set_name} on "
+ f"protection domain with id: {protection_domain_id}")
+ LOG.info(msg)
+ self.powerflex_conn.fault_set.create(
+ name=fault_set_name, protection_domain_id=protection_domain_id
+ )
+ return self.get_fault_set(
+ fault_set_name=fault_set_name,
+ protection_domain_id=protection_domain_id)
+
+ except Exception as e:
+ error_msg = (f"Create fault set {fault_set_name} operation failed "
+ f"with error {str(e)}")
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_fault_set(self, fault_set_name=None, fault_set_id=None, protection_domain_id=None):
+ """Get fault set details
+ :param fault_set_name: Name of the fault set
+ :param fault_set_id: Id of the fault set
+ :param protection_domain_id: ID of the protection domain
+ :return: Fault set details
+ :rtype: dict
+ """
+ return Configuration(self.powerflex_conn, self.module).get_fault_set(
+ fault_set_name=fault_set_name, fault_set_id=fault_set_id, protection_domain_id=protection_domain_id)
+
+ def is_rename_required(self, fault_set_details, fault_set_params):
+ """To get the details of the fields to be modified."""
+
+ if fault_set_params['fault_set_new_name'] is not None and \
+ fault_set_params['fault_set_new_name'] != fault_set_details['name']:
+ return True
+
+ return False
+
+ def rename_fault_set(self, fault_set_id,
+ new_name):
+ """Perform rename operation on a fault set"""
+
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.fault_set.rename(
+ fault_set_id=fault_set_id,
+ name=new_name)
+ return self.get_fault_set(
+ fault_set_id=fault_set_id)
+ except Exception as e:
+ msg = (f'Failed to rename the fault set instance '
+ f'with error {str(e)}')
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def delete_fault_set(self, fault_set_id):
+ """Delete the Fault Set"""
+ try:
+ if not self.module.check_mode:
+ LOG.info(msg=f"Removing Fault Set {fault_set_id}")
+ self.powerflex_conn.fault_set.delete(fault_set_id)
+ LOG.info("returning None")
+ return None
+ return self.get_fault_set(
+ fault_set_id=fault_set_id)
+ except Exception as e:
+ errormsg = f"Removing Fault Set {fault_set_id} failed with error {str(e)}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_parameters(self, fault_set_params):
+ params = [fault_set_params['fault_set_name'], fault_set_params['fault_set_new_name']]
+ for param in params:
+ if param is not None and len(param.strip()) == 0:
+ error_msg = "Provide valid value for name for the " \
+ "creation/modification of the fault set."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ if fault_set_params['fault_set_name'] is not None and \
+ fault_set_params['protection_domain_id'] is None and fault_set_params['protection_domain_name'] is None:
+ error_msg = "Provide protection_domain_id/protection_domain_name with fault_set_name."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+
+def get_powerflex_fault_set_parameters():
+ """This method provide parameter required for the Ansible Fault Set module on
+ PowerFlex"""
+ return dict(
+ fault_set_name=dict(),
+ fault_set_id=dict(),
+ protection_domain_name=dict(),
+ protection_domain_id=dict(),
+ fault_set_new_name=dict(),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+
+class FaultSetExitHandler():
+ def handle(self, fault_set_obj, fault_set_details):
+ fault_set_obj.result["fault_set_details"] = fault_set_details
+ if fault_set_details:
+ fault_set_obj.result["fault_set_details"]["protectionDomainName"] = \
+ fault_set_obj.get_protection_domain(
+ protection_domain_id=fault_set_details["protectionDomainId"])["name"]
+ fault_set_obj.result["fault_set_details"]["SDS"] = \
+ fault_set_obj.get_associated_sds(
+ fault_set_id=fault_set_details['id'])
+ fault_set_obj.module.exit_json(**fault_set_obj.result)
+
+
+class FaultSetDeleteHandler():
+ def handle(self, fault_set_obj, fault_set_params, fault_set_details):
+ if fault_set_params['state'] == 'absent' and fault_set_details:
+ fault_set_details = fault_set_obj.delete_fault_set(fault_set_details['id'])
+ fault_set_obj.result['changed'] = True
+
+ FaultSetExitHandler().handle(fault_set_obj, fault_set_details)
+
+
+class FaultSetRenameHandler():
+ def handle(self, fault_set_obj, fault_set_params, fault_set_details):
+ if fault_set_params['state'] == 'present' and fault_set_details:
+ is_rename_required = fault_set_obj.is_rename_required(fault_set_details, fault_set_params)
+ if is_rename_required:
+ fault_set_details = fault_set_obj.rename_fault_set(fault_set_id=fault_set_details['id'],
+ new_name=fault_set_params['fault_set_new_name'])
+ fault_set_obj.result['changed'] = True
+
+ FaultSetDeleteHandler().handle(fault_set_obj, fault_set_params, fault_set_details)
+
+
+class FaultSetCreateHandler():
+ def handle(self, fault_set_obj, fault_set_params, fault_set_details, pd_id):
+ if fault_set_params['state'] == 'present' and not fault_set_details:
+ fault_set_details = fault_set_obj.create_fault_set(fault_set_name=fault_set_params['fault_set_name'],
+ protection_domain_id=pd_id)
+ fault_set_obj.result['changed'] = True
+
+ FaultSetRenameHandler().handle(fault_set_obj, fault_set_params, fault_set_details)
+
+
+class FaultSetHandler():
+ def handle(self, fault_set_obj, fault_set_params):
+ fault_set_obj.validate_parameters(fault_set_params=fault_set_params)
+ pd_id = None
+ if fault_set_params['protection_domain_id'] or fault_set_params['protection_domain_name']:
+ pd_id = fault_set_obj.get_protection_domain(
+ protection_domain_id=fault_set_params['protection_domain_id'],
+ protection_domain_name=fault_set_params['protection_domain_name'])['id']
+ fault_set_details = fault_set_obj.get_fault_set(fault_set_id=fault_set_params['fault_set_id'],
+ fault_set_name=fault_set_params['fault_set_name'],
+ protection_domain_id=pd_id)
+ FaultSetCreateHandler().handle(fault_set_obj, fault_set_params, fault_set_details, pd_id)
+
+
+def main():
+ """ Create PowerFlex fault set object and perform action on it
+ based on user input from playbook."""
+ obj = PowerFlexFaultSet()
+ FaultSetHandler().handle(obj, obj.module.params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/info.py b/ansible_collections/dellemc/powerflex/plugins/modules/info.py
index ff1401d63..33f3a8ad8 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/info.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/info.py
@@ -1,6 +1,6 @@
-#!/usr/bin/python
+# !/usr/bin/python
-# Copyright: (c) 2021, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
"""Ansible module for Gathering information about Dell Technologies (Dell) PowerFlex"""
@@ -21,12 +21,16 @@ description:
- Gathering information about Dell PowerFlex storage system includes
getting the api details, list of volumes, SDSs, SDCs, storage pools,
protection domains, snapshot policies, and devices.
+- Gathering information about Dell PowerFlex Manager includes getting the
+ list of managed devices, deployments and service templates.
extends_documentation_fragment:
- dellemc.powerflex.powerflex
author:
- Arindam Datta (@dattaarindam) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+- Jennifer John (@Jennifer-John) <ansible.team@dell.com>
options:
gather_subset:
@@ -42,8 +46,13 @@ options:
- Devices - C(device).
- Replication consistency groups - C(rcg).
- Replication pairs - C(replication_pair).
+ - Fault Sets - C(fault_set).
+ - Service templates - C(service_template).
+ - Managed devices - C(managed_device).
+ - Deployments - C(deployment).
choices: [vol, storage_pool, protection_domain, sdc, sds,
- snapshot_policy, device, rcg, replication_pair]
+ snapshot_policy, device, rcg, replication_pair,
+ fault_set, service_template, managed_device, deployment]
type: list
elements: str
filters:
@@ -62,16 +71,63 @@ options:
filter_operator:
description:
- Operation to be performed on filter key.
+ - Choice I('contains') is supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
type: str
- choices: [equal]
+ choices: [equal, contains]
required: true
filter_value:
description:
- Value of the filter key.
type: str
required: true
+ limit:
+ description:
+ - Page limit.
+ - Supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
+ type: int
+ default: 50
+ offset:
+ description:
+ - Pagination offset.
+ - Supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
+ type: int
+ default: 0
+ sort:
+ description:
+ - Sort the returned components based on specified field.
+ - Supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
+ - The supported sort keys for the gather_subset can be referred from PowerFlex Manager API documentation in developer.dell.com.
+ type: str
+ include_devices:
+ description:
+ - Include devices in response.
+ - Applicable when gather_subset is I(deployment).
+ type: bool
+ default: true
+ include_template:
+ description:
+ - Include service templates in response.
+ - Applicable when gather_subset is I(deployment).
+ type: bool
+ default: true
+ full:
+ description:
+ - Specify if response is full or brief.
+ - Applicable when gather_subset is I(deployment), I(service_template).
+ - For I(deployment) specify to use full templates including resources in response.
+ type: bool
+ default: false
+ include_attachments:
+ description:
+ - Include attachments.
+ - Applicable when gather_subset is I(service_template).
+ type: bool
+ default: true
notes:
- The I(check_mode) is supported.
+ - The supported filter keys for the gather_subset can be referred from PowerFlex Manager API documentation in developer.dell.com.
+ - The I(filter), I(sort), I(limit) and I(offset) options will be ignored when more than one I(gather_subset) is specified along with
+ I(service_template), I(managed_device) or I(deployment).
'''
EXAMPLES = r'''
@@ -91,6 +147,7 @@ EXAMPLES = r'''
- device
- rcg
- replication_pair
+ - fault_set
- name: Get a subset list of PowerFlex volumes
dellemc.powerflex.info:
@@ -104,6 +161,35 @@ EXAMPLES = r'''
- filter_key: "name"
filter_operator: "equal"
filter_value: "ansible_test"
+
+- name: Get deployment and resource provisioning info
+ dellemc.powerflex.info:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - managed_device
+ - deployment
+ - service_template
+
+- name: Get deployment with filter, sort, pagination
+ dellemc.powerflex.info:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - deployment
+ filters:
+ - filter_key: "name"
+ filter_operator: "contains"
+ filter_value: "partial"
+ sort: name
+ limit: 10
+ offset: 10
+ include_devices: true
+ include_template: true
'''
RETURN = r'''
@@ -1147,19 +1233,557 @@ Replication_pairs:
"replicationConsistencyGroupId": "e2ce036b00000002",
"userRequestedPauseTransmitInitCopy": false
}
+Fault_Sets:
+ description: Details of fault sets.
+ returned: always
+ type: list
+ contains:
+ protectionDomainId:
+ description: The ID of the protection domain.
+ type: str
+ name:
+ description: device name.
+ type: str
+ id:
+ description: device id.
+ type: str
+ sample: [
+ {
+ "protectionDomainId": "da721a8300000000",
+ "protectionDomainName": "fault_set_1",
+ "name": "at1zbs1t6cp2sds1d1fs1",
+ "SDS": [],
+ "id": "eb44b70500000000",
+ "links": [
+ { "rel": "self", "href": "/api/instances/FaultSet::eb44b70500000000" },
+ {
+ "rel": "/api/FaultSet/relationship/Statistics",
+ "href": "/api/instances/FaultSet::eb44b70500000000/relationships/Statistics"
+ },
+ {
+ "rel": "/api/FaultSet/relationship/Sds",
+ "href": "/api/instances/FaultSet::eb44b70500000000/relationships/Sds"
+ },
+ {
+ "rel": "/api/parent/relationship/protectionDomainId",
+ "href": "/api/instances/ProtectionDomain::da721a8300000000"
+ }
+ ]
+ },
+ {
+ "protectionDomainId": "da721a8300000000",
+ "protectionDomainName": "fault_set_2",
+ "name": "at1zbs1t6cp2sds1d1fs3",
+ "SDS": [],
+ "id": "eb44b70700000002",
+ "links": [
+ { "rel": "self", "href": "/api/instances/FaultSet::eb44b70700000002" },
+ {
+ "rel": "/api/FaultSet/relationship/Statistics",
+ "href": "/api/instances/FaultSet::eb44b70700000002/relationships/Statistics"
+ },
+ {
+ "rel": "/api/FaultSet/relationship/Sds",
+ "href": "/api/instances/FaultSet::eb44b70700000002/relationships/Sds"
+ },
+ {
+ "rel": "/api/parent/relationship/protectionDomainId",
+ "href": "/api/instances/ProtectionDomain::da721a8300000000"
+ }
+ ]
+ }
+ ]
+ManagedDevices:
+ description: Details of all devices from inventory.
+ returned: when I(gather_subset) is I(managed_device)
+ type: list
+ contains:
+ deviceType:
+ description: Device Type.
+ type: str
+ serviceTag:
+ description: Service Tag.
+ type: str
+ serverTemplateId:
+ description: The ID of the server template.
+ type: str
+ state:
+ description: The state of the device.
+ type: str
+ managedState:
+ description: The managed state of the device.
+ type: str
+ compliance:
+ description: The compliance state of the device.
+ type: str
+ systemId:
+ description: The system ID.
+ type: str
+ sample: [{
+ "refId": "softwareOnlyServer-10.1.1.1",
+ "refType": null,
+ "ipAddress": "10.1.1.1",
+ "currentIpAddress": "10.1.1.1",
+ "serviceTag": "VMware-42 15 a5 f9 65 e6 63 0e-36 79 59 73 7b 3a 68 cd-SW",
+ "model": "VMware Virtual Platform",
+ "deviceType": "SoftwareOnlyServer",
+ "discoverDeviceType": "SOFTWAREONLYSERVER_CENTOS",
+ "displayName": "vpi1011-c1n1",
+ "managedState": "UNMANAGED",
+ "state": "READY",
+ "inUse": false,
+ "serviceReferences": [],
+ "statusMessage": null,
+ "firmwareName": "Default Catalog - PowerFlex 4.5.0.0",
+ "customFirmware": false,
+ "needsAttention": false,
+ "manufacturer": "VMware, Inc.",
+ "systemId": null,
+ "health": "RED",
+ "healthMessage": "Inventory run failed.",
+ "operatingSystem": "N/A",
+ "numberOfCPUs": 0,
+ "cpuType": null,
+ "nics": 0,
+ "memoryInGB": 0,
+ "infraTemplateDate": null,
+ "infraTemplateId": null,
+ "serverTemplateDate": null,
+ "serverTemplateId": null,
+ "inventoryDate": null,
+ "complianceCheckDate": "2024-02-05T18:31:31.213+00:00",
+ "discoveredDate": "2024-02-05T18:31:30.992+00:00",
+ "deviceGroupList": {
+ "paging": null,
+ "deviceGroup": [
+ {
+ "link": null,
+ "groupSeqId": -1,
+ "groupName": "Global",
+ "groupDescription": null,
+ "createdDate": null,
+ "createdBy": "admin",
+ "updatedDate": null,
+ "updatedBy": null,
+ "managedDeviceList": null,
+ "groupUserList": null
+ }
+ ]
+ },
+ "detailLink": {
+ "title": "softwareOnlyServer-10.1.1.1",
+ "href": "/AsmManager/ManagedDevice/softwareOnlyServer-10.1.1.1",
+ "rel": "describedby",
+ "type": null
+ },
+ "credId": "bc97cefb-5eb4-4c20-8e39-d1a2b809c9f5",
+ "compliance": "NONCOMPLIANT",
+ "failuresCount": 0,
+ "chassisId": null,
+ "parsedFacts": null,
+ "config": null,
+ "hostname": "vpi1011-c1n1",
+ "osIpAddress": null,
+ "osAdminCredential": null,
+ "osImageType": null,
+ "lastJobs": null,
+ "puppetCertName": "red_hat-10.1.1.1",
+ "svmAdminCredential": null,
+ "svmName": null,
+ "svmIpAddress": null,
+ "svmImageType": null,
+ "flexosMaintMode": 0,
+ "esxiMaintMode": 0,
+ "vmList": []
+ }]
+Deployments:
+ description: Details of all deployments.
+ returned: when I(gather_subset) is I(deployment)
+ type: list
+ contains:
+ id:
+ description: Deployment ID.
+ type: str
+ deploymentName:
+ description: Deployment name.
+ type: str
+ status:
+ description: The status of deployment.
+ type: str
+ firmwareRepository:
+ description: The firmware repository.
+ type: dict
+ contains:
+ signature:
+ description: The signature details.
+ type: str
+ downloadStatus:
+ description: The download status.
+ type: str
+ rcmapproved:
+ description: If RCM approved.
+ type: bool
+ sample: [{
+ "id": "8aaa80658cd602e0018cda8b257f78ce",
+ "deploymentName": "Test-Update - K",
+ "deploymentDescription": "Test-Update - K",
+ "deploymentValid": null,
+ "retry": false,
+ "teardown": false,
+ "teardownAfterCancel": false,
+ "removeService": false,
+ "createdDate": "2024-01-05T16:53:21.407+00:00",
+ "createdBy": "admin",
+ "updatedDate": "2024-02-11T17:00:05.657+00:00",
+ "updatedBy": "system",
+ "deploymentScheduledDate": null,
+ "deploymentStartedDate": "2024-01-05T16:53:22.886+00:00",
+ "deploymentFinishedDate": null,
+ "serviceTemplate": {
+ "id": "8aaa80658cd602e0018cda8b257f78ce",
+ "templateName": "block-only (8aaa80658cd602e0018cda8b257f78ce)",
+ "templateDescription": "Storage - Software Only deployment",
+ "templateType": "VxRack FLEX",
+ "templateVersion": "4.5.0.0",
+ "templateValid": {
+ "valid": true,
+ "messages": []
+ },
+ "originalTemplateId": "c44cb500-020f-4562-9456-42ec1eb5f9b2",
+ "templateLocked": false,
+ "draft": false,
+ "inConfiguration": false,
+ "createdDate": "2024-01-05T16:53:22.083+00:00",
+ "createdBy": null,
+ "updatedDate": "2024-02-09T06:00:09.602+00:00",
+ "lastDeployedDate": null,
+ "updatedBy": null,
+ "components": [
+ {
+ "id": "6def7edd-bae2-4420-93bf-9ceb051bbb65",
+ "componentID": "component-scaleio-gateway-1",
+ "identifier": null,
+ "componentValid": {
+ "valid": true,
+ "messages": []
+ },
+ "puppetCertName": "scaleio-block-legacy-gateway",
+ "osPuppetCertName": null,
+ "name": "block-legacy-gateway",
+ "type": "SCALEIO",
+ "subType": "STORAGEONLY",
+ "teardown": false,
+ "helpText": null,
+ "managementIpAddress": null,
+ "configFile": null,
+ "serialNumber": null,
+ "asmGUID": "scaleio-block-legacy-gateway",
+ "relatedComponents": {
+ "625b0e17-9b91-4bc0-864c-d0111d42d8d0": "Node (Software Only)",
+ "961a59eb-80c3-4a3a-84b7-2101e9831527": "Node (Software Only)-2",
+ "bca710a5-7cdf-481e-b729-0b53e02873ee": "Node (Software Only)-3"
+ },
+ "resources": [],
+ "refId": null,
+ "cloned": false,
+ "clonedFromId": null,
+ "manageFirmware": false,
+ "brownfield": false,
+ "instances": 1,
+ "clonedFromAsmGuid": null,
+ "ip": null
+ }
+ ],
+ "category": "block-only",
+ "allUsersAllowed": true,
+ "assignedUsers": [],
+ "manageFirmware": true,
+ "useDefaultCatalog": false,
+ "firmwareRepository": null,
+ "licenseRepository": null,
+ "configuration": null,
+ "serverCount": 3,
+ "storageCount": 1,
+ "clusterCount": 1,
+ "serviceCount": 0,
+ "switchCount": 0,
+ "vmCount": 0,
+ "sdnasCount": 0,
+ "brownfieldTemplateType": "NONE",
+ "networks": [
+ {
+ "id": "8aaa80648cd5fb9b018cda46e4e50000",
+ "name": "mgmt",
+ "description": "",
+ "type": "SCALEIO_MANAGEMENT",
+ "vlanId": 850,
+ "static": true,
+ "staticNetworkConfiguration": {
+ "gateway": "10.1.1.1",
+ "subnet": "1.1.1.0",
+ "primaryDns": "10.1.1.1",
+ "secondaryDns": "10.1.1.1",
+ "dnsSuffix": null,
+ "ipRange": [
+ {
+ "id": "8aaa80648cd5fb9b018cda46e5080001",
+ "startingIp": "10.1.1.1",
+ "endingIp": "10.1.1.1",
+ "role": null
+ }
+ ],
+ "ipAddress": null,
+ "staticRoute": null
+ },
+ "destinationIpAddress": "10.1.1.1"
+ }
+ ],
+ "blockServiceOperationsMap": {
+ "scaleio-block-legacy-gateway": {
+ "blockServiceOperationsMap": {}
+ }
+ }
+ },
+ "scheduleDate": null,
+ "status": "complete",
+ "compliant": true,
+ "deploymentDevice": [
+ {
+ "refId": "scaleio-block-legacy-gateway",
+ "refType": null,
+ "logDump": null,
+ "status": null,
+ "statusEndTime": null,
+ "statusStartTime": null,
+ "deviceHealth": "GREEN",
+ "healthMessage": "OK",
+ "compliantState": "COMPLIANT",
+ "brownfieldStatus": "NOT_APPLICABLE",
+ "deviceType": "scaleio",
+ "deviceGroupName": null,
+ "ipAddress": "block-legacy-gateway",
+ "currentIpAddress": "10.1.1.1",
+ "serviceTag": "block-legacy-gateway",
+ "componentId": null,
+ "statusMessage": null,
+ "model": "PowerFlex Gateway",
+ "cloudLink": false,
+ "dasCache": false,
+ "deviceState": "READY",
+ "puppetCertName": "scaleio-block-legacy-gateway",
+ "brownfield": false
+ }
+ ],
+ "vms": null,
+ "updateServerFirmware": true,
+ "useDefaultCatalog": false,
+ "firmwareRepository": {
+ "id": "8aaa80658cd602e0018cd996a1c91bdc",
+ "name": "Intelligent Catalog 45.373.00",
+ "sourceLocation": null,
+ "sourceType": null,
+ "diskLocation": null,
+ "filename": null,
+ "md5Hash": null,
+ "username": null,
+ "password": null,
+ "downloadStatus": null,
+ "createdDate": null,
+ "createdBy": null,
+ "updatedDate": null,
+ "updatedBy": null,
+ "defaultCatalog": false,
+ "embedded": false,
+ "state": null,
+ "softwareComponents": [],
+ "softwareBundles": [],
+ "deployments": [],
+ "bundleCount": 0,
+ "componentCount": 0,
+ "userBundleCount": 0,
+ "minimal": false,
+ "downloadProgress": 0,
+ "extractProgress": 0,
+ "fileSizeInGigabytes": null,
+ "signedKeySourceLocation": null,
+ "signature": null,
+ "custom": false,
+ "needsAttention": false,
+ "jobId": null,
+ "rcmapproved": false
+ },
+ "firmwareRepositoryId": "8aaa80658cd602e0018cd996a1c91bdc",
+ "licenseRepository": null,
+ "licenseRepositoryId": null,
+ "individualTeardown": false,
+ "deploymentHealthStatusType": "green",
+ "assignedUsers": [],
+ "allUsersAllowed": true,
+ "owner": "admin",
+ "noOp": false,
+ "firmwareInit": false,
+ "disruptiveFirmware": false,
+ "preconfigureSVM": false,
+ "preconfigureSVMAndUpdate": false,
+ "servicesDeployed": "NONE",
+ "precalculatedDeviceHealth": null,
+ "lifecycleModeReasons": [],
+ "jobDetails": null,
+ "numberOfDeployments": 0,
+ "operationType": "NONE",
+ "operationStatus": null,
+ "operationData": null,
+ "deploymentValidationResponse": null,
+ "currentStepCount": null,
+ "totalNumOfSteps": null,
+ "currentStepMessage": null,
+ "customImage": "os_sles",
+ "originalDeploymentId": null,
+ "currentBatchCount": null,
+ "totalBatchCount": null,
+ "templateValid": true,
+ "lifecycleMode": false,
+ "vds": false,
+ "scaleUp": false,
+ "brownfield": false,
+ "configurationChange": false
+ }]
+ServiceTemplates:
+ description: Details of all service templates.
+ returned: when I(gather_subset) is I(service_template)
+ type: list
+ contains:
+ templateName:
+ description: Template name.
+ type: str
+ templateDescription:
+ description: Template description.
+ type: str
+ templateType:
+ description: Template type.
+ type: str
+ templateVersion:
+ description: Template version.
+ type: str
+ category:
+ description: The template category.
+ type: str
+ serverCount:
+ description: Server count.
+ type: int
+ sample: [{
+ "id": "2434144f-7795-4245-a04b-6fcb771697d7",
+ "templateName": "Storage- 100Gb",
+ "templateDescription": "Storage Only 4 Node deployment with 100Gb networking",
+ "templateType": "VxRack FLEX",
+ "templateVersion": "4.5-213",
+ "templateValid": {
+ "valid": true,
+ "messages": []
+ },
+ "originalTemplateId": "ff80808177f880fc0177f883bf1e0027",
+ "templateLocked": true,
+ "draft": false,
+ "inConfiguration": false,
+ "createdDate": "2024-01-04T19:47:23.534+00:00",
+ "createdBy": "system",
+ "updatedDate": null,
+ "lastDeployedDate": null,
+ "updatedBy": null,
+ "components": [
+ {
+ "id": "43dec024-85a9-4901-9e8e-fa0d3c417f7b",
+ "componentID": "component-scaleio-gateway-1",
+ "identifier": null,
+ "componentValid": {
+ "valid": true,
+ "messages": []
+ },
+ "puppetCertName": null,
+ "osPuppetCertName": null,
+ "name": "PowerFlex Cluster",
+ "type": "SCALEIO",
+ "subType": "STORAGEONLY",
+ "teardown": false,
+ "helpText": null,
+ "managementIpAddress": null,
+ "configFile": null,
+ "serialNumber": null,
+ "asmGUID": null,
+ "relatedComponents": {
+ "c5c46733-012c-4dca-af9b-af46d73d045a": "Storage Only Node"
+ },
+ "resources": [],
+ "refId": null,
+ "cloned": false,
+ "clonedFromId": null,
+ "manageFirmware": false,
+ "brownfield": false,
+ "instances": 1,
+ "clonedFromAsmGuid": null,
+ "ip": null
+ }
+ ],
+ "category": "Sample Templates",
+ "allUsersAllowed": false,
+ "assignedUsers": [],
+ "manageFirmware": true,
+ "useDefaultCatalog": true,
+ "firmwareRepository": null,
+ "licenseRepository": null,
+ "configuration": null,
+ "serverCount": 4,
+ "storageCount": 0,
+ "clusterCount": 1,
+ "serviceCount": 0,
+ "switchCount": 0,
+ "vmCount": 0,
+ "sdnasCount": 0,
+ "brownfieldTemplateType": "NONE",
+ "networks": [
+ {
+ "id": "ff80808177f8823b0177f8bb82d80005",
+ "name": "flex-data2",
+ "description": "",
+ "type": "SCALEIO_DATA",
+ "vlanId": 105,
+ "static": true,
+ "staticNetworkConfiguration": {
+ "gateway": null,
+ "subnet": "1.1.1.0",
+ "primaryDns": null,
+ "secondaryDns": null,
+ "dnsSuffix": null,
+ "ipRange": null,
+ "ipAddress": null,
+ "staticRoute": null
+ },
+ "destinationIpAddress": "1.1.1.0"
+ }
+ ],
+ "blockServiceOperationsMap": {}
+ }]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
import utils
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \
+ import Configuration
+import re
LOG = utils.get_logger('info')
+UNSUPPORTED_SUBSET_FOR_VERSION = 'One or more specified subset is not supported for the PowerFlex version.'
+POWERFLEX_MANAGER_GATHER_SUBSET = {'managed_device', 'deployment', 'service_template'}
+MIN_SUPPORTED_POWERFLEX_MANAGER_VERSION = 4.0
+ERROR_CODES = r'PARSE002|FILTER002|FILTER003'
+
class PowerFlexInfo(object):
"""Class with Info operations"""
- filter_mapping = {'equal': 'eq.'}
+ filter_mapping = {'equal': 'eq', 'contains': 'co'}
def __init__(self):
""" Define all parameters required by this module"""
@@ -1265,7 +1889,7 @@ class PowerFlexInfo(object):
return result_list(sds)
except Exception as e:
- msg = 'Get sds list from powerflex array failed with' \
+ msg = 'Get SDS list from powerflex array failed with' \
' error %s' % (str(e))
LOG.error(msg)
self.module.fail_json(msg=msg)
@@ -1395,19 +2019,24 @@ class PowerFlexInfo(object):
system """
try:
- LOG.info('Getting snapshot schedules list ')
+ LOG.info('Getting snapshot policies list ')
if filter_dict:
- snapshot_schedules = \
+ snapshot_policies = \
self.powerflex_conn.snapshot_policy.get(
filter_fields=filter_dict)
else:
- snapshot_schedules = \
+ snapshot_policies = \
self.powerflex_conn.snapshot_policy.get()
- return result_list(snapshot_schedules)
+ if snapshot_policies:
+ statistics_map = self.powerflex_conn.utility.get_statistics_for_all_snapshot_policies()
+ list_of_snap_pol_ids_in_statistics = statistics_map.keys()
+ for item in snapshot_policies:
+ item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_snap_pol_ids_in_statistics else {}
+ return result_list(snapshot_policies)
except Exception as e:
- msg = 'Get snapshot schedules list from powerflex array failed ' \
+ msg = 'Get snapshot policies list from powerflex array failed ' \
'with error %s' % (str(e))
LOG.error(msg)
self.module.fail_json(msg=msg)
@@ -1431,6 +2060,114 @@ class PowerFlexInfo(object):
LOG.error(msg)
self.module.fail_json(msg=msg)
+ def get_fault_sets_list(self, filter_dict=None):
+ """ Get the list of fault sets on a given PowerFlex storage
+ system """
+
+ try:
+ LOG.info('Getting fault set list ')
+ filter_pd = []
+ if filter_dict:
+ if 'protectionDomainName' in filter_dict.keys():
+ filter_pd = filter_dict['protectionDomainName']
+ del filter_dict['protectionDomainName']
+ fault_sets = self.powerflex_conn.fault_set.get(filter_fields=filter_dict)
+ else:
+ fault_sets = self.powerflex_conn.fault_set.get()
+
+ fault_set_final = []
+ if fault_sets:
+ for fault_set in fault_sets:
+ fault_set['protectionDomainName'] = Configuration(self.powerflex_conn, self.module).get_protection_domain(
+ protection_domain_id=fault_set["protectionDomainId"])["name"]
+ fault_set["SDS"] = Configuration(self.powerflex_conn, self.module).get_associated_sds(
+ fault_set_id=fault_set['id'])
+ fault_set_final.append(fault_set)
+ fault_sets = []
+ for fault_set in fault_set_final:
+ if fault_set['protectionDomainName'] in filter_pd:
+ fault_sets.append(fault_set)
+ if len(filter_pd) != 0:
+ return result_list(fault_sets)
+ return result_list(fault_set_final)
+
+ except Exception as e:
+ msg = 'Get fault set list from powerflex array failed ' \
+ 'with error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_managed_devices_list(self):
+ """ Get the list of managed devices on a given PowerFlex Manager system """
+ try:
+ LOG.info('Getting managed devices list ')
+ devices = self.powerflex_conn.managed_device.get(filters=self.populate_filter_list(),
+ limit=self.get_param_value('limit'),
+ offset=self.get_param_value('offset'),
+ sort=self.get_param_value('sort'))
+ return devices
+ except Exception as e:
+ msg = f'Get managed devices from PowerFlex Manager failed with error {str(e)}'
+ return self.handle_error_exit(msg)
+
+ def get_deployments_list(self):
+ """ Get the list of deployments on a given PowerFlex Manager system """
+ try:
+ LOG.info('Getting deployments list ')
+ deployments = self.powerflex_conn.deployment.get(filters=self.populate_filter_list(),
+ sort=self.get_param_value('sort'),
+ limit=self.get_param_value('limit'),
+ offset=self.get_param_value('offset'),
+ include_devices=self.get_param_value('include_devices'),
+ include_template=self.get_param_value('include_template'),
+ full=self.get_param_value('full'))
+ return deployments
+ except Exception as e:
+ msg = f'Get deployments from PowerFlex Manager failed with error {str(e)}'
+ return self.handle_error_exit(msg)
+
+ def get_service_templates_list(self):
+ """ Get the list of service templates on a given PowerFlex Manager system """
+ try:
+ LOG.info('Getting service templates list ')
+ service_templates = self.powerflex_conn.service_template.get(filters=self.populate_filter_list(),
+ sort=self.get_param_value('sort'),
+ offset=self.get_param_value('offset'),
+ limit=self.get_param_value('limit'),
+ full=self.get_param_value('full'),
+ include_attachments=self.get_param_value('include_attachments'))
+ return service_templates
+ except Exception as e:
+ msg = f'Get service templates from PowerFlex Manager failed with error {str(e)}'
+ return self.handle_error_exit(msg)
+
+ def handle_error_exit(self, detailed_message):
+ match = re.search(r"displayMessage=([^']+)", detailed_message)
+ error_message = match.group(1) if match else detailed_message
+ LOG.error(error_message)
+ if re.search(ERROR_CODES, detailed_message):
+ return []
+ self.module.fail_json(msg=error_message)
+
+ def get_param_value(self, param):
+ """
+ Get the value of the given parameter.
+ Args:
+ param (str): The parameter to get the value for.
+ Returns:
+ The value of the parameter if it is different from the default value,
+ The value of the parameter if int and greater than 0
+ otherwise None.
+ """
+ if param in ('sort', 'offset', 'limit') and len(self.module.params.get('gather_subset')) > 1:
+ return None
+
+ default_value = self.module_params.get(param).get('default')
+ param_value = self.module.params.get(param)
+ if (default_value != param_value) and (param_value >= 0 if isinstance(param_value, int) else True):
+ return param_value
+ return None
+
def validate_filter(self, filter_dict):
""" Validate given filter_dict """
@@ -1447,6 +2184,16 @@ class PowerFlexInfo(object):
LOG.error(msg)
self.module.fail_json(msg=msg)
+ def populate_filter_list(self):
+ """Populate the filter list"""
+ if len(self.module.params.get('gather_subset')) > 1:
+ return []
+ filters = self.module.params.get('filters') or []
+ return [
+ f'{self.filter_mapping.get(filter_dict["filter_operator"])},{filter_dict["filter_key"]},{filter_dict["filter_value"]}'
+ for filter_dict in filters
+ ]
+
def get_filters(self, filters):
"""Get the filters to be applied"""
@@ -1454,7 +2201,7 @@ class PowerFlexInfo(object):
for item in filters:
self.validate_filter(item)
f_op = item['filter_operator']
- if self.filter_mapping.get(f_op):
+ if self.filter_mapping.get(f_op) == self.filter_mapping.get("equal"):
f_key = item['filter_key']
f_val = item['filter_value']
if f_key in filter_dict:
@@ -1468,15 +2215,12 @@ class PowerFlexInfo(object):
filter_dict[f_key] = [filter_dict[f_key], f_val]
else:
filter_dict[f_key] = f_val
- else:
- msg = "Given filter operator '{0}' is not supported." \
- "supported operators are : '{1}'".format(
- f_op,
- list(self.filter_mapping.keys()))
- LOG.error(msg)
- self.module.fail_json(msg=msg)
return filter_dict
+ def validate_subset(self, api_version, subset):
+ if float(api_version) < MIN_SUPPORTED_POWERFLEX_MANAGER_VERSION and subset and set(subset).issubset(POWERFLEX_MANAGER_GATHER_SUBSET):
+ self.module.exit_json(msg=UNSUPPORTED_SUBSET_FOR_VERSION, skipped=True)
+
def perform_module_operation(self):
""" Perform different actions on info based on user input
in the playbook """
@@ -1498,8 +2242,13 @@ class PowerFlexInfo(object):
device = []
rcgs = []
replication_pair = []
+ fault_sets = []
+ service_template = []
+ managed_device = []
+ deployment = []
subset = self.module.params['gather_subset']
+ self.validate_subset(api_version, subset)
if subset is not None:
if 'sdc' in subset:
sdc = self.get_sdc_list(filter_dict=filter_dict)
@@ -1519,6 +2268,14 @@ class PowerFlexInfo(object):
rcgs = self.get_replication_consistency_group_list(filter_dict=filter_dict)
if 'replication_pair' in subset:
replication_pair = self.get_replication_pair_list(filter_dict=filter_dict)
+ if 'fault_set' in subset:
+ fault_sets = self.get_fault_sets_list(filter_dict=filter_dict)
+ if 'managed_device' in subset:
+ managed_device = self.get_managed_devices_list()
+ if 'service_template' in subset:
+ service_template = self.get_service_templates_list()
+ if 'deployment' in subset:
+ deployment = self.get_deployments_list()
self.module.exit_json(
Array_Details=array_details,
@@ -1531,7 +2288,11 @@ class PowerFlexInfo(object):
Protection_Domains=protection_domain,
Devices=device,
Replication_Consistency_Groups=rcgs,
- Replication_Pairs=replication_pair
+ Replication_Pairs=replication_pair,
+ Fault_Sets=fault_sets,
+ ManagedDevices=managed_device,
+ ServiceTemplates=service_template,
+ Deployments=deployment
)
@@ -1556,15 +2317,24 @@ def get_powerflex_info_parameters():
return dict(
gather_subset=dict(type='list', required=False, elements='str',
choices=['vol', 'storage_pool',
- 'protection_domain', 'sdc', 'sds',
- 'snapshot_policy', 'device', 'rcg', 'replication_pair']),
+ 'protection_domain', 'sdc', 'sds', 'snapshot_policy',
+ 'device', 'rcg', 'replication_pair', 'fault_set',
+ 'service_template', 'managed_device', 'deployment']),
filters=dict(type='list', required=False, elements='dict',
options=dict(filter_key=dict(type='str', required=True, no_log=False),
filter_operator=dict(
type='str', required=True,
- choices=['equal']),
+ choices=['equal', 'contains']),
filter_value=dict(type='str', required=True)
- )))
+ )),
+ sort=dict(type='str'),
+ limit=dict(type='int', default=50),
+ offset=dict(type='int', default=0),
+ include_devices=dict(type='bool', default=True),
+ include_template=dict(type='bool', default=True),
+ full=dict(type='bool', default=False),
+ include_attachments=dict(type='bool', default=True)
+ )
def main():
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py
index 084666bc3..90e0bcad0 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py
@@ -145,7 +145,7 @@ notes:
interfaces.
- Parameters I(mdm_name) or I(mdm_id) are not required while modifying performance
profile.
- - For change MDM cluster ownership operation, only changed as True will be
+ - For change MDM cluster ownership operation, only changed as true will be
returned and for idempotency case MDM cluster details will be returned.
- Reinstall all SDC after changing ownership to some newly added MDM.
- To add manager standby MDM, MDM package must be installed with manager
@@ -229,7 +229,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
mdm_name: "mdm_2"
- is_primary: True
+ is_primary: true
state: "present"
- name: Modify performance profile
@@ -262,7 +262,7 @@ EXAMPLES = r'''
port: "{{port}}"
mdm_name: "mdm_1"
virtual_ip_interface:
- - "ens224"
+ - "ens224"
state: "present"
- name: Clear virtual IP interface of the MDM
@@ -273,7 +273,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
mdm_name: "mdm_1"
- clear_interfaces: True
+ clear_interfaces: true
state: "present"
'''
@@ -1052,6 +1052,12 @@ class PowerFlexMdmCluster(object):
if resp is not None:
mdm_cluster_details['perfProfile'] = resp['perfProfile']
+ # Append list of configured MDM IP addresses
+ gateway_configuration_details = self.powerflex_conn.system.\
+ get_gateway_configuration_details()
+ if gateway_configuration_details is not None:
+ mdm_cluster_details['mdmAddresses'] = gateway_configuration_details['mdmAddresses']
+
return mdm_cluster_details
except Exception as e:
@@ -1063,30 +1069,32 @@ class PowerFlexMdmCluster(object):
def check_ip_in_secondarys(self, standby_ip, cluster_details):
"""whether standby IPs present in secondary MDMs"""
- for secondary_mdm in cluster_details['slaves']:
- current_secondary_ips = secondary_mdm['ips']
- for ips in standby_ip:
- if ips in current_secondary_ips:
- LOG.info(self.exist_msg)
- return False
+ if 'slaves' in cluster_details:
+ for secondary_mdm in cluster_details['slaves']:
+ current_secondary_ips = secondary_mdm['ips']
+ for ips in standby_ip:
+ if ips in current_secondary_ips:
+ LOG.info(self.exist_msg)
+ return False
return True
def check_ip_in_tbs(self, standby_ip, cluster_details):
"""whether standby IPs present in tie-breaker MDMs"""
- for tb_mdm in cluster_details['tieBreakers']:
- current_tb_ips = tb_mdm['ips']
- for ips in standby_ip:
- if ips in current_tb_ips:
- LOG.info(self.exist_msg)
- return False
+ if 'tieBreakers' in cluster_details:
+ for tb_mdm in cluster_details['tieBreakers']:
+ current_tb_ips = tb_mdm['ips']
+ for ips in standby_ip:
+ if ips in current_tb_ips:
+ LOG.info(self.exist_msg)
+ return False
return True
def check_ip_in_standby(self, standby_ip, cluster_details):
"""whether standby IPs present in standby MDMs"""
if 'standbyMDMs' in cluster_details:
- for stb_mdm in cluster_details['tieBreakers']:
+ for stb_mdm in cluster_details['standbyMDMs']:
current_stb_ips = stb_mdm['ips']
for ips in standby_ip:
if ips in current_stb_ips:
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py
index 5ffdc6b63..18cb952f0 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py
@@ -537,6 +537,14 @@ class PowerFlexProtectionDomain(object):
err_msg = msg.format(n_item)
self.module.fail_json(msg=err_msg)
+ if self.module.params['network_limits'] is not None:
+ if self.module.params['network_limits']['overall_limit'] is not None and \
+ self.module.params['network_limits']['overall_limit'] < 0:
+ error_msg = "Overall limit cannot be negative. " \
+ "Provide a valid value "
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
def is_id_or_new_name_in_create(self):
"""Checking if protection domain id or new names present in create """
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py
index 94ec651c3..b106dfbdc 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py
@@ -16,7 +16,8 @@ short_description: Manage replication consistency groups on Dell PowerFlex
description:
- Managing replication consistency groups on PowerFlex storage system includes
getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze,
- activate, inactivate and deleting a replication consistency group.
+ activate, failover, reverse, restore, sync, switchover,
+ inactivate and deleting a replication consistency group.
author:
- Trisha Datta (@Trisha-Datta) <ansible.team@dell.com>
- Jennifer John (@Jennifer-John) <ansible.team@dell.com>
@@ -61,15 +62,35 @@ options:
pause:
description:
- Pause or resume the RCG.
+ - This parameter is deprecated. Use rcg_state instead.
+ type: bool
+ rcg_state:
+ description:
+ - Specify an action for RCG.
+ - Failover the RCG.
+ - Reverse the RCG.
+ - Restore the RCG.
+ - Switchover the RCG.
+ - Pause or resume the RCG.
+ - Freeze or unfreeze the RCG.
+ - Synchronize the RCG.
+ choices: ['failover', 'reverse', 'restore',
+ 'switchover', 'sync', 'pause',
+ 'resume', 'freeze', 'unfreeze']
+ type: str
+ force:
+ description:
+ - Force switchover the RCG.
type: bool
freeze:
description:
- Freeze or unfreeze the RCG.
+ - This parameter is deprecated. Use rcg_state instead.
type: bool
pause_mode:
description:
- Pause mode.
- - It is required if pause is set as True.
+ - It is required if pause is set as true.
choices: ['StopDataTransfer', 'OnlyTrackChanges']
type: str
target_volume_access_mode:
@@ -150,7 +171,7 @@ notes:
- Idempotency is not supported for create snapshot operation.
- There is a delay in reflection of final state of RCG after few update operations on RCG.
- In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode
- if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as True.
+ if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as true.
'''
EXAMPLES = r'''
@@ -172,7 +193,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_id: "{{rcg_id}}"
- create_snapshot: True
+ create_snapshot: true
state: "present"
- name: Create a replication consistency group
@@ -205,7 +226,7 @@ EXAMPLES = r'''
rpo: 60
target_volume_access_mode: "ReadOnly"
activity_mode: "Inactive"
- is_consistent: True
+ is_consistent: true
- name: Rename replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -225,7 +246,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- pause: True
+ rcg_state: "pause"
pause_mode: "StopDataTransfer"
- name: Resume replication consistency group
@@ -236,7 +257,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- pause: False
+ rcg_state: "resume"
- name: Freeze replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -246,7 +267,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- freeze: True
+ rcg_state: "freeze"
- name: UnFreeze replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -256,7 +277,57 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- freeze: False
+ rcg_state: "unfreeze"
+
+- name: Failover replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "failover"
+
+- name: Reverse replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "reverse"
+
+- name: Restore replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "restore"
+
+- name: Switchover replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "switchover"
+
+- name: Synchronize replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "sync"
- name: Delete replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -442,8 +513,8 @@ class PowerFlexReplicationConsistencyGroup(object):
def get_rcg(self, rcg_name=None, rcg_id=None):
"""Get rcg details
- :param rcg_name: Name of the rcg
- :param rcg_id: ID of the rcg
+ :param rcg_name: Name of the RCG
+ :param rcg_id: ID of the RCG
:return: RCG details
"""
name_or_id = rcg_id if rcg_id else rcg_name
@@ -585,22 +656,22 @@ class PowerFlexReplicationConsistencyGroup(object):
:param rcg_details: RCG details.
:param pause: Pause or resume RCG.
:param pause_mode: Specifies the pause mode if pause is True.
- :return: Boolean indicates if rcg action is successful
+ :return: Boolean indicates if RCG action is successful
"""
if pause and rcg_details['pauseMode'] == 'None':
if not pause_mode:
self.module.fail_json(msg="Specify pause_mode to perform pause on replication consistency group.")
return self.pause(rcg_id, pause_mode)
- if not pause and rcg_details['pauseMode'] != 'None':
+ if not pause and (rcg_details['pauseMode'] != 'None' or rcg_details['failoverType'] in ['Failover', 'Switchover']):
return self.resume(rcg_id)
def freeze_or_unfreeze_rcg(self, rcg_id, rcg_details, freeze):
- """Perform specified rcg action
+ """Perform specified RCG action
:param rcg_id: Unique identifier of the RCG.
:param rcg_details: RCG details.
:param freeze: Freeze or unfreeze RCG.
- :return: Boolean indicates if rcg action is successful
+ :return: Boolean indicates if RCG action is successful
"""
if freeze and rcg_details['freezeState'].lower() == 'unfrozen':
return self.freeze(rcg_id)
@@ -648,6 +719,98 @@ class PowerFlexReplicationConsistencyGroup(object):
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
+ def failover(self, rcg_id):
+ """Perform failover
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG failover is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.failover(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Failover replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def reverse(self, rcg_id):
+ """Perform reverse
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG reverse is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.reverse(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Reverse replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def restore(self, rcg_id):
+ """Perform restore
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG restore is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.restore(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Restore replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def switchover(self, rcg_id, force):
+ """Perform switchover
+ :param rcg_id: Unique identifier of the RCG.
+ :param force: Force switchover.
+ :return: Boolean indicates if RCG switchover is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.switchover(rcg_id, force)
+ return True
+ except Exception as e:
+ errormsg = f"Switchover replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_rcg_action(self, rcg_id, rcg_details):
+ """Perform failover, reverse, restore or switchover
+ :param rcg_id: Unique identifier of the RCG.
+ :param rcg_details: RCG details.
+ :return: Boolean indicates if RCG action is successful
+ """
+ rcg_state = self.module.params['rcg_state']
+ force = self.module.params['force']
+
+ if rcg_state == 'failover' and rcg_details['failoverType'] != 'Failover':
+ return self.failover(rcg_id)
+
+ if rcg_state == 'switchover' and rcg_details['failoverType'] != 'Switchover':
+ return self.switchover(rcg_id, force)
+
+ if rcg_state == 'reverse' and rcg_details['failoverType']:
+ return self.reverse(rcg_id)
+
+ if rcg_state == 'restore' and rcg_details['failoverType'] != 'None':
+ return self.restore(rcg_id)
+
+ def sync(self, rcg_id):
+ """Perform sync
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG sync is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.sync(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Synchronization of replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
def set_consistency(self, rcg_id, rcg_details, is_consistent):
"""Set rcg to specified mode
:param rcg_id: Unique identifier of the RCG.
@@ -689,7 +852,7 @@ class PowerFlexReplicationConsistencyGroup(object):
def delete_rcg(self, rcg_id):
"""Delete RCG
:param rcg_id: Unique identifier of the RCG.
- :return: Boolean indicates if delete rcg operation is successful
+ :return: Boolean indicates if delete RCG operation is successful
"""
try:
if not self.module.check_mode:
@@ -753,17 +916,55 @@ class PowerFlexReplicationConsistencyGroup(object):
rcg_params['remote_peer']['protection_domain_name'] is not None):
self.module.fail_json(msg='Enter remote protection_domain_name or protection_domain_id to create replication consistency group')
+ def get_pause_and_freeze_value(self):
+ """
+ Get Pause and Freeze values
+ :return: Boolean for pause and freeze
+ :rtype: (bool,bool)
+ """
+ rcg_state = self.module.params['rcg_state']
+ pause = self.module.params['pause']
+ freeze = self.module.params['freeze']
+
+ if pause is not None:
+ self.module.deprecate(
+ msg="Use 'rcg_state' param instead of 'pause'",
+ version="3.0.0",
+ collection_name="dellemc.powerflex"
+ )
+
+ if freeze is not None:
+ self.module.deprecate(
+ msg="Use 'rcg_state' param instead of 'freeze'",
+ version="3.0.0",
+ collection_name="dellemc.powerflex"
+ )
+
+ if rcg_state == 'pause':
+ pause = True
+ if rcg_state == 'resume':
+ pause = False
+ if rcg_state == 'freeze':
+ freeze = True
+ if rcg_state == 'unfreeze':
+ freeze = False
+
+ if self.module.params['pause_mode'] and not pause:
+ self.module.fail_json(msg="Specify rcg_state as 'pause' to pause replication consistency group")
+
+ return pause, freeze
+
def modify_rcg(self, rcg_id, rcg_details):
+ rcg_state = self.module.params['rcg_state']
create_snapshot = self.module.params['create_snapshot']
rpo = self.module.params['rpo']
target_volume_access_mode = self.module.params['target_volume_access_mode']
- pause = self.module.params['pause']
- freeze = self.module.params['freeze']
is_consistent = self.module.params['is_consistent']
activity_mode = self.module.params['activity_mode']
new_rcg_name = self.module.params['new_rcg_name']
changed = False
+ pause, freeze = self.get_pause_and_freeze_value()
if create_snapshot is True:
changed = self.create_rcg_snapshot(rcg_id)
if rpo and rcg_details['rpoInSeconds'] and \
@@ -788,6 +989,11 @@ class PowerFlexReplicationConsistencyGroup(object):
changed = True
if new_rcg_name and self.rename_rcg(rcg_id, rcg_details, new_rcg_name):
changed = True
+ if rcg_state == 'sync' and self.sync(rcg_id):
+ changed = True
+
+ rcg_action_status = self.perform_rcg_action(rcg_id, rcg_details)
+ changed = changed or rcg_action_status
return changed
@@ -800,8 +1006,6 @@ class PowerFlexReplicationConsistencyGroup(object):
for param in params:
if rcg_params[param] and utils.is_invalid_name(rcg_params[param]):
self.module.fail_json(msg='Enter a valid %s' % param)
- if rcg_params['pause_mode'] and rcg_params['pause'] is None:
- self.module.fail_json(msg='Specify pause as True to pause replication consistency group')
except Exception as e:
error_msg = "Validating input parameters failed with " \
"error '%s'" % (str(e))
@@ -879,7 +1083,13 @@ def get_powerflex_replication_consistency_group_parameters():
rpo=dict(type='int'), protection_domain_id=dict(),
protection_domain_name=dict(), new_rcg_name=dict(),
activity_mode=dict(choices=['Active', 'Inactive']),
- pause=dict(type='bool'), freeze=dict(type='bool'),
+ pause=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='dellemc.powerflex'),
+ freeze=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='dellemc.powerflex'),
+ force=dict(type='bool'),
+ rcg_state=dict(choices=['failover', 'reverse',
+ 'restore', 'switchover',
+ 'sync', 'pause', 'resume',
+ 'freeze', 'unfreeze']),
pause_mode=dict(choices=['StopDataTransfer', 'OnlyTrackChanges']),
target_volume_access_mode=dict(choices=['ReadOnly', 'NoAccess']),
is_consistent=dict(type='bool'),
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py b/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py
index c95455023..1bd69f225 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-# Copyright: (c) 2023, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
""" Ansible module for managing replication pairs on Dell Technologies (Dell) PowerFlex"""
@@ -77,7 +77,7 @@ options:
- Copy type.
choices: ['Identical', 'OnlineCopy', 'OnlineHashCopy', 'OfflineCopy']
type: str
- required: True
+ required: true
name:
description:
- Name of replication pair.
@@ -138,7 +138,6 @@ notes:
'''
EXAMPLES = r'''
-
- name: Get replication pair details
dellemc.powerflex.replication_pair:
hostname: "{{hostname}}"
@@ -176,11 +175,11 @@ EXAMPLES = r'''
copy_type: "OnlineCopy"
name: "pair1"
remote_peer:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
- name: Pause replication pair
dellemc.powerflex.replication_pair:
@@ -190,7 +189,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
pair_name: "pair1"
- pause: True
+ pause: true
- name: Resume replication pair
dellemc.powerflex.replication_pair:
@@ -200,7 +199,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
pair_name: "pair1"
- pause: False
+ pause: false
- name: Delete replication pair
dellemc.powerflex.replication_pair:
@@ -596,7 +595,7 @@ class PowerFlexReplicationPair(object):
def validate_pause(self, params):
if params['pause'] is not None and (not params['pair_id'] and not params['pair_name']):
- self.module.fail_json(msg='Specify either pair_id or pair_name to perform pause or resume of inital copy')
+ self.module.fail_json(msg='Specify either pair_id or pair_name to perform pause or resume of initial copy')
def validate_pause_or_resume(self, pause, replication_pair_details, pair_id):
if not replication_pair_details:
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py
index a2f05a31b..bb13a19a2 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py
@@ -46,6 +46,12 @@ options:
description:
- New name of the SDC. Used to rename the SDC.
type: str
+ performance_profile:
+ description:
+ - Define the performance profile as I(Compact) or I(HighPerformance).
+ - The high performance profile configures a predefined set of parameters for very high performance use cases.
+ choices: ['Compact', 'HighPerformance']
+ type: str
state:
description:
- State of the SDC.
@@ -75,6 +81,25 @@ EXAMPLES = r'''
sdc_name: "centos_sdc"
sdc_new_name: "centos_sdc_renamed"
state: "present"
+
+- name: Modify performance profile of SDC using SDC name
+ dellemc.powerflex.sdc:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ sdc_name: "centos_sdc"
+ performance_profile: "Compact"
+ state: "present"
+
+- name: Remove SDC using SDC name
+ dellemc.powerflex.sdc:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ sdc_name: "centos_sdc"
+ state: "absent"
'''
RETURN = r'''
@@ -274,16 +299,54 @@ class PowerFlexSdc(object):
def validate_parameters(self, sdc_name=None, sdc_id=None, sdc_ip=None):
"""Validate the input parameters"""
- if all(param is None for param in [sdc_name, sdc_id, sdc_ip]):
- self.module.fail_json(msg="Please provide sdc_name/sdc_id/sdc_ip "
- "with valid input.")
-
sdc_identifiers = ['sdc_name', 'sdc_id', 'sdc_ip']
for param in sdc_identifiers:
if self.module.params[param] is not None and \
len(self.module.params[param].strip()) == 0:
- error_msg = "Please provide valid %s" % param
- self.module.fail_json(msg=error_msg)
+ msg = f"Please provide valid {param}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def remove(self, sdc_id):
+ """Remove the SDC"""
+ try:
+ LOG.info(msg=f"Removing SDC {sdc_id}")
+ self.powerflex_conn.sdc.delete(sdc_id)
+ return True
+ except Exception as e:
+ errormsg = f"Removing SDC {sdc_id} failed with error {str(e)}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def set_performance_profile(self, sdc_id, performance_profile):
+ """Set performance profile of SDC"""
+ try:
+ LOG.info(msg=f"Setting performance profile of SDC {sdc_id}")
+ self.powerflex_conn.sdc.set_performance_profile(sdc_id, performance_profile)
+ return True
+ except Exception as e:
+ errormsg = f"Modifying performance profile of SDC {sdc_id} failed with error {str(e)}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_input(self, sdc_details, sdc_new_name, state, id_ip_name):
+ if state == 'present' and not sdc_details:
+ error_msg = 'Could not find any SDC instance with ' \
+ 'identifier %s.' % id_ip_name
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sdc_new_name and len(sdc_new_name.strip()) == 0:
+ self.module.fail_json(msg="Provide valid SDC name to rename to.")
+
+ def perform_modify(self, sdc_details, sdc_new_name, performance_profile):
+ changed = False
+ if sdc_new_name is not None and sdc_new_name != sdc_details['name']:
+ changed = self.rename_sdc(sdc_details['id'], sdc_new_name)
+
+ if performance_profile and performance_profile != sdc_details['perfProfile']:
+ changed = self.set_performance_profile(sdc_details['id'], performance_profile)
+ return changed
def perform_module_operation(self):
"""
@@ -294,6 +357,7 @@ class PowerFlexSdc(object):
sdc_id = self.module.params['sdc_id']
sdc_ip = self.module.params['sdc_ip']
sdc_new_name = self.module.params['sdc_new_name']
+ performance_profile = self.module.params['performance_profile']
state = self.module.params['state']
# result is a dictionary to contain end state and SDC details
@@ -304,40 +368,22 @@ class PowerFlexSdc(object):
)
self.validate_parameters(sdc_name, sdc_id, sdc_ip)
-
sdc_details = self.get_sdc(sdc_name=sdc_name, sdc_id=sdc_id,
sdc_ip=sdc_ip)
- if sdc_name:
- id_ip_name = sdc_name
- elif sdc_ip:
- id_ip_name = sdc_ip
- else:
- id_ip_name = sdc_id
+ id_ip_name = sdc_name or sdc_ip or sdc_id
- if state == 'present' and not sdc_details:
- error_msg = 'Could not find any SDC instance with ' \
- 'identifier %s.' % id_ip_name
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ self.validate_input(sdc_details, sdc_new_name, state, id_ip_name)
if state == 'absent' and sdc_details:
- error_msg = 'Removal of SDC is not allowed through Ansible ' \
- 'module.'
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
-
- if state == 'present' and sdc_details and sdc_new_name is not None:
- if len(sdc_new_name.strip()) == 0:
- self.module.fail_json(msg="Please provide valid SDC name.")
-
- changed = self.rename_sdc(sdc_details['id'], sdc_new_name)
+ changed = self.remove(sdc_details['id'])
- if changed:
- sdc_name = sdc_new_name
+ if state == 'present' and sdc_details:
+ changed = self.perform_modify(sdc_details, sdc_new_name, performance_profile)
- if state == 'present':
- result['sdc_details'] = self.get_sdc(sdc_name=sdc_name,
- sdc_id=sdc_id, sdc_ip=sdc_ip)
+ if changed:
+ sdc_details = self.get_sdc(sdc_name=sdc_new_name or sdc_name,
+ sdc_id=sdc_id, sdc_ip=sdc_ip)
+ result['sdc_details'] = sdc_details
result['changed'] = changed
self.module.exit_json(**result)
@@ -349,7 +395,7 @@ def get_powerflex_sdc_parameters():
sdc_id=dict(),
sdc_ip=dict(),
sdc_name=dict(),
- sdc_new_name=dict(),
+ sdc_new_name=dict(), performance_profile=dict(choices=['Compact', 'HighPerformance']),
state=dict(required=True, type='str', choices=['present', 'absent'])
)
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sds.py b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py
index 91c287769..b0d3045ec 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/sds.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-# Copyright: (c) 2021, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
""" Ansible module for managing SDS on Dell Technologies (Dell) PowerFlex"""
@@ -19,6 +19,7 @@ description:
modifying attributes of SDS, and deleting SDS.
author:
- Rajshree Khare (@khareRajshree) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
extends_documentation_fragment:
- dellemc.powerflex.powerflex
options:
@@ -96,6 +97,16 @@ options:
- Default value by API is C(HighPerformance).
choices: ['Compact', 'HighPerformance']
type: str
+ fault_set_name:
+ description:
+ - Name of the fault set.
+ - Mutually exclusive with I(fault_set_id).
+ type: str
+ fault_set_id:
+ description:
+ - Unique identifier of the fault set.
+ - Mutually exclusive with I(fault_set_name).
+ type: str
state:
description:
- State of the SDS.
@@ -114,7 +125,7 @@ notes:
'sdsOnly').
- SDS can be created with RF cache disabled, but, be aware that the RF cache
is not always updated. In this case, the user should re-try the operation.
- - The I(check_mode) is not supported.
+ - The I(check_mode) is supported.
'''
EXAMPLES = r'''
@@ -142,6 +153,7 @@ EXAMPLES = r'''
port: "{{port}}"
sds_name: "node1"
protection_domain_name: "domain1"
+ fault_set_name: "faultset1"
sds_ip_list:
- ip: "198.10.xxx.xxx"
role: "sdcOnly"
@@ -479,12 +491,16 @@ sds_details:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\
import utils
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.powerflex_base \
+ import PowerFlexBase
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \
+ import Configuration
import copy
LOG = utils.get_logger('sds')
-class PowerFlexSDS(object):
+class PowerFlexSDS(PowerFlexBase):
"""Class with SDS operations"""
def __init__(self):
@@ -493,29 +509,27 @@ class PowerFlexSDS(object):
self.module_params.update(get_powerflex_sds_parameters())
mut_ex_args = [['sds_name', 'sds_id'],
- ['protection_domain_name', 'protection_domain_id']]
+ ['protection_domain_name', 'protection_domain_id'],
+ ['fault_set_name', 'fault_set_id']]
required_together_args = [['sds_ip_list', 'sds_ip_state']]
required_one_of_args = [['sds_name', 'sds_id']]
# initialize the Ansible module
- self.module = AnsibleModule(
- argument_spec=self.module_params,
- supports_check_mode=False,
- mutually_exclusive=mut_ex_args,
- required_together=required_together_args,
- required_one_of=required_one_of_args)
-
- utils.ensure_required_libs(self.module)
-
- try:
- self.powerflex_conn = utils.get_powerflex_gateway_host_connection(
- self.module.params)
- LOG.info("Got the PowerFlex system connection object instance")
- except Exception as e:
- LOG.error(str(e))
- self.module.fail_json(msg=str(e))
+ ansible_module_params = {
+ 'argument_spec': get_powerflex_sds_parameters(),
+ 'supports_check_mode': True,
+ 'mutually_exclusive': mut_ex_args,
+ 'required_one_of': required_one_of_args,
+ 'required_together': required_together_args
+ }
+ super().__init__(AnsibleModule, ansible_module_params)
+
+ self.result = dict(
+ changed=False,
+ sds_details={}
+ )
def validate_rmcache_size_parameter(self, rmcache_enabled, rmcache_size):
"""Validate the input parameters"""
@@ -571,40 +585,24 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- def get_protection_domain(self, protection_domain_name=None,
- protection_domain_id=None):
- """Get protection domain details
- :param protection_domain_name: Name of the protection domain
+ def get_protection_domain(
+ self, protection_domain_name=None, protection_domain_id=None
+ ):
+ """Get the details of a protection domain in a given PowerFlex storage
+ system"""
+ return Configuration(self.powerflex_conn, self.module).get_protection_domain(
+ protection_domain_name=protection_domain_name, protection_domain_id=protection_domain_id)
+
+ def get_fault_set(self, fault_set_name=None, fault_set_id=None, protection_domain_id=None):
+ """Get fault set details
+ :param fault_set_name: Name of the fault set
+ :param fault_set_id: Id of the fault set
:param protection_domain_id: ID of the protection domain
- :return: Protection domain details
+ :return: Fault set details
:rtype: dict
"""
- name_or_id = protection_domain_id if protection_domain_id \
- else protection_domain_name
- try:
- pd_details = None
- if protection_domain_id:
- pd_details = self.powerflex_conn.protection_domain.get(
- filter_fields={'id': protection_domain_id})
-
- if protection_domain_name:
- pd_details = self.powerflex_conn.protection_domain.get(
- filter_fields={'name': protection_domain_name})
-
- if not pd_details:
- error_msg = "Unable to find the protection domain with " \
- "'%s'. Please enter a valid protection domain " \
- "name/id." % name_or_id
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
-
- return pd_details[0]
-
- except Exception as e:
- error_msg = "Failed to get the protection domain '%s' with " \
- "error '%s'" % (name_or_id, str(e))
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ return Configuration(self.powerflex_conn, self.module).get_fault_set(
+ fault_set_name=fault_set_name, fault_set_id=fault_set_id, protection_domain_id=protection_domain_id)
def restructure_ip_role_dict(self, sds_ip_list):
"""Restructure IP role dict
@@ -619,8 +617,41 @@ class PowerFlexSDS(object):
new_sds_ip_list.append({"SdsIp": item})
return new_sds_ip_list
- def create_sds(self, protection_domain_id, sds_ip_list, sds_ip_state,
- sds_name, rmcache_enabled=None, rmcache_size=None):
+ def validate_create(self, protection_domain_id, sds_ip_list, sds_ip_state, sds_name,
+ sds_id, sds_new_name, rmcache_enabled=None, rmcache_size=None,
+ fault_set_id=None):
+
+ if sds_name is None or len(sds_name.strip()) == 0:
+ error_msg = "Please provide valid sds_name value for " \
+ "creation of SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if protection_domain_id is None:
+ error_msg = "Protection Domain is a mandatory parameter " \
+ "for creating an SDS. Please enter a valid value."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sds_ip_list is None or len(sds_ip_list) == 0:
+ error_msg = "Please provide valid sds_ip_list values for " \
+ "creation of SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sds_ip_state is not None and sds_ip_state != "present-in-sds":
+ error_msg = "Incorrect IP state given for creation of SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sds_id:
+ error_msg = "Creation of SDS is allowed using sds_name " \
+ "only, sds_id given."
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_sds(self, protection_domain_id, sds_ip_list, sds_ip_state, sds_name,
+ sds_id, sds_new_name, rmcache_enabled=None, rmcache_size=None, fault_set_id=None):
"""Create SDS
:param protection_domain_id: ID of the Protection Domain
:type protection_domain_id: str
@@ -636,62 +667,53 @@ class PowerFlexSDS(object):
:type rmcache_enabled: bool
:param rmcache_size: Read RAM cache size (in MB)
:type rmcache_size: int
+ :param fault_set_id: ID of the Fault Set
+ :type fault_set_id: str
:return: Boolean indicating if create operation is successful
"""
try:
- if sds_name is None or len(sds_name.strip()) == 0:
- error_msg = "Please provide valid sds_name value for " \
- "creation of SDS."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
- if protection_domain_id is None:
- error_msg = "Protection Domain is a mandatory parameter " \
- "for creating a SDS. Please enter a valid value."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
-
- if sds_ip_list is None or len(sds_ip_list) == 0:
- error_msg = "Please provide valid sds_ip_list values for " \
- "creation of SDS."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ # Restructure IP-role parameter format
+ self.validate_create(protection_domain_id=protection_domain_id,
+ sds_ip_list=sds_ip_list, sds_ip_state=sds_ip_state,
+ sds_name=sds_name, sds_id=sds_id, sds_new_name=sds_new_name,
+ rmcache_enabled=rmcache_enabled, rmcache_size=rmcache_size,
+ fault_set_id=fault_set_id)
- if sds_ip_state is not None and sds_ip_state != "present-in-sds":
- error_msg = "Incorrect IP state given for creation of SDS."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ self.validate_ip_parameter(sds_ip_list)
- # Restructure IP-role parameter format
- if sds_ip_list and sds_ip_state == "present-in-sds":
- sds_ip_list = self.restructure_ip_role_dict(sds_ip_list)
-
- if rmcache_size is not None:
- self.validate_rmcache_size_parameter(rmcache_enabled,
- rmcache_size)
- # set rmcache size in KB
- rmcache_size = rmcache_size * 1024
-
- create_params = ("protection_domain_id: %s,"
- " sds_ip_list: %s,"
- " sds_name: %s,"
- " rmcache_enabled: %s, "
- " rmcache_size_KB: %s"
- % (protection_domain_id, sds_ip_list,
- sds_name, rmcache_enabled, rmcache_size))
- LOG.info("Creating SDS with params: %s", create_params)
-
- self.powerflex_conn.sds.create(
- protection_domain_id=protection_domain_id,
- sds_ips=sds_ip_list,
- name=sds_name,
- rmcache_enabled=rmcache_enabled,
- rmcache_size_in_kb=rmcache_size)
- return True
+ if not self.module.check_mode:
+ if sds_ip_list and sds_ip_state == "present-in-sds":
+ sds_ip_list = self.restructure_ip_role_dict(sds_ip_list)
+
+ if rmcache_size is not None:
+ self.validate_rmcache_size_parameter(rmcache_enabled=rmcache_enabled,
+ rmcache_size=rmcache_size)
+ # set rmcache size in KB
+ rmcache_size = rmcache_size * 1024
+
+ create_params = ("protection_domain_id: %s,"
+ " sds_ip_list: %s,"
+ " sds_name: %s,"
+ " rmcache_enabled: %s, "
+ " rmcache_size_KB: %s, "
+ " fault_set_id: %s"
+ % (protection_domain_id, sds_ip_list,
+ sds_name, rmcache_enabled, rmcache_size,
+ fault_set_id))
+ LOG.info("Creating SDS with params: %s", create_params)
+
+ self.powerflex_conn.sds.create(
+ protection_domain_id=protection_domain_id,
+ sds_ips=sds_ip_list,
+ name=sds_name,
+ rmcache_enabled=rmcache_enabled,
+ rmcache_size_in_kb=rmcache_size,
+ fault_set_id=fault_set_id)
+ return self.get_sds_details(sds_name=sds_name)
except Exception as e:
- error_msg = "Create SDS '%s' operation failed with error '%s'" \
- % (sds_name, str(e))
+ error_msg = f"Create SDS {sds_name} operation failed with error {str(e)}"
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
@@ -716,21 +738,20 @@ class PowerFlexSDS(object):
"""
modify_dict = {}
- if sds_new_name is not None:
- if len(sds_new_name.strip()) == 0:
- error_msg = "Please provide valid SDS name."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
- if sds_new_name != sds_details['name']:
- modify_dict['name'] = sds_new_name
+ if sds_new_name is not None and \
+ sds_new_name != sds_details['name']:
+ modify_dict['name'] = sds_new_name
- if rfcache_enabled is not None and \
- sds_details['rfcacheEnabled'] != rfcache_enabled:
- modify_dict['rfcacheEnabled'] = rfcache_enabled
+ param_input = dict()
+ param_input['rfcacheEnabled'] = rfcache_enabled
+ param_input['rmcacheEnabled'] = rmcache_enabled
+ param_input['perfProfile'] = performance_profile
- if rmcache_enabled is not None and \
- sds_details['rmcacheEnabled'] != rmcache_enabled:
- modify_dict['rmcacheEnabled'] = rmcache_enabled
+ param_list = ['rfcacheEnabled', 'rmcacheEnabled', 'perfProfile']
+ for param in param_list:
+ if param_input[param] is not None and \
+ sds_details[param] != param_input[param]:
+ modify_dict[param] = param_input[param]
if rmcache_size is not None:
self.validate_rmcache_size_parameter(rmcache_enabled,
@@ -748,10 +769,6 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- if performance_profile is not None and \
- sds_details['perfProfile'] != performance_profile:
- modify_dict['perfProfile'] = performance_profile
-
return modify_dict
def modify_sds_attributes(self, sds_id, modify_dict,
@@ -772,41 +789,42 @@ class PowerFlexSDS(object):
" updated is '%s'." % (str(modify_dict))
LOG.info(msg)
- if 'name' in modify_dict:
- self.powerflex_conn.sds.rename(sds_id, modify_dict['name'])
- msg = "The name of the SDS is updated to '%s' successfully." \
- % modify_dict['name']
- LOG.info(msg)
+ if not self.module.check_mode:
+ if 'name' in modify_dict:
+ self.powerflex_conn.sds.rename(sds_id, modify_dict['name'])
+ msg = "The name of the SDS is updated to '%s' successfully." \
+ % modify_dict['name']
+ LOG.info(msg)
- if 'rfcacheEnabled' in modify_dict:
- self.powerflex_conn.sds.set_rfcache_enabled(
- sds_id, modify_dict['rfcacheEnabled'])
- msg = "The use RFcache is updated to '%s' successfully." \
- % modify_dict['rfcacheEnabled']
- LOG.info(msg)
+ if 'rfcacheEnabled' in modify_dict:
+ self.powerflex_conn.sds.set_rfcache_enabled(
+ sds_id, modify_dict['rfcacheEnabled'])
+ msg = "The use RFcache is updated to '%s' successfully." \
+ % modify_dict['rfcacheEnabled']
+ LOG.info(msg)
- if 'rmcacheEnabled' in modify_dict:
- self.powerflex_conn.sds.set_rmcache_enabled(
- sds_id, modify_dict['rmcacheEnabled'])
- msg = "The use RMcache is updated to '%s' successfully." \
- % modify_dict['rmcacheEnabled']
- LOG.info(msg)
+ if 'rmcacheEnabled' in modify_dict:
+ self.powerflex_conn.sds.set_rmcache_enabled(
+ sds_id, modify_dict['rmcacheEnabled'])
+ msg = "The use RMcache is updated to '%s' successfully." \
+ % modify_dict['rmcacheEnabled']
+ LOG.info(msg)
- if 'rmcacheSizeInMB' in modify_dict:
- self.powerflex_conn.sds.set_rmcache_size(
- sds_id, modify_dict['rmcacheSizeInMB'])
- msg = "The size of RMcache is updated to '%s' successfully." \
- % modify_dict['rmcacheSizeInMB']
- LOG.info(msg)
+ if 'rmcacheSizeInMB' in modify_dict:
+ self.powerflex_conn.sds.set_rmcache_size(
+ sds_id, modify_dict['rmcacheSizeInMB'])
+ msg = "The size of RMcache is updated to '%s' successfully." \
+ % modify_dict['rmcacheSizeInMB']
+ LOG.info(msg)
- if 'perfProfile' in modify_dict:
- self.powerflex_conn.sds.set_performance_parameters(
- sds_id, modify_dict['perfProfile'])
- msg = "The performance profile is updated to '%s'" \
- % modify_dict['perfProfile']
- LOG.info(msg)
+ if 'perfProfile' in modify_dict:
+ self.powerflex_conn.sds.set_performance_parameters(
+ sds_id, modify_dict['perfProfile'])
+ msg = "The performance profile is updated to '%s'" \
+ % modify_dict['perfProfile']
+ LOG.info(msg)
- return True
+ return self.get_sds_details(sds_id=sds_id)
except Exception as e:
if create_flag:
error_msg = "Create SDS is successful, but failed to update" \
@@ -818,50 +836,39 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- def identify_ip_role(self, sds_ip_list, sds_details, sds_ip_state):
- """Identify IPs before addition/removal
- :param sds_ip_list: List of one or more IP addresses and
- their roles
- :type sds_ip_list: list[dict]
- :param sds_details: SDS details
- :type sds_details: dict
- :param sds_ip_state: State of IP in SDS
- :type sds_ip_state: str
- :return: List containing the key-value pairs of IP-role for an
- SDS
- :rtype: list[dict]
- """
+ def identify_ip_role_add(self, sds_ip_list, sds_details, sds_ip_state):
+ # identify IPs to add or roles to update
+
existing_ip_role_list = sds_details['ipList']
+ update_role = []
+ ips_to_add = []
+
+ # identify IPs to add
+ existing_ip_list = []
+ if existing_ip_role_list:
+ for ip in existing_ip_role_list:
+ existing_ip_list.append(ip['ip'])
+ for given_ip in sds_ip_list:
+ ip = given_ip['ip']
+ if ip not in existing_ip_list:
+ ips_to_add.append(given_ip)
+ LOG.info("IP(s) to be added: %s", ips_to_add)
+
+ if len(ips_to_add) != 0:
+ for ip in ips_to_add:
+ sds_ip_list.remove(ip)
+
+ # identify IPs whose role needs to be updated
+ update_role = [ip for ip in sds_ip_list
+ if ip not in existing_ip_role_list]
+ LOG.info("Role update needed for: %s", update_role)
+ return ips_to_add, update_role
+
+ def identify_ip_role_remove(self, sds_ip_list, sds_details, sds_ip_state):
+ # identify IPs to remove
- # identify IPs to add or roles to update
- if sds_ip_state == "present-in-sds":
- update_role = []
- ips_to_add = []
-
- # identify IPs to add
- existing_ip_list = []
- if existing_ip_role_list:
- for ip in existing_ip_role_list:
- existing_ip_list.append(ip['ip'])
- for given_ip in sds_ip_list:
- ip = given_ip['ip']
- if ip not in existing_ip_list:
- ips_to_add.append(given_ip)
- LOG.info("IP(s) to be added: %s", ips_to_add)
-
- if len(ips_to_add) != 0:
- for ip in ips_to_add:
- sds_ip_list.remove(ip)
-
- # identify IPs whose role needs to be updated
- update_role = [ip for ip in sds_ip_list
- if ip not in existing_ip_role_list]
- LOG.info("Role update needed for: %s", update_role)
-
- return ips_to_add, update_role
-
- elif sds_ip_state == "absent-in-sds":
- # identify IPs to remove
+ existing_ip_role_list = sds_details['ipList']
+ if sds_ip_state == "absent-in-sds":
ips_to_remove = [ip for ip in existing_ip_role_list
if ip in sds_ip_list]
if len(ips_to_remove) != 0:
@@ -869,7 +876,7 @@ class PowerFlexSDS(object):
return ips_to_remove
else:
LOG.info("IP(s) do not exists.")
- return False, None
+ return []
def add_ip(self, sds_id, sds_ip_list):
"""Add IP to SDS
@@ -881,10 +888,11 @@ class PowerFlexSDS(object):
:return: Boolean indicating if add IP operation is successful
"""
try:
- for ip in sds_ip_list:
- LOG.info("IP to add: %s", ip)
- self.powerflex_conn.sds.add_ip(sds_id=sds_id, sds_ip=ip)
- LOG.info("IP added successfully.")
+ if not self.module.check_mode:
+ for ip in sds_ip_list:
+ LOG.info("IP to add: %s", ip)
+ self.powerflex_conn.sds.add_ip(sds_id=sds_id, sds_ip=ip)
+ LOG.info("IP added successfully.")
return True
except Exception as e:
error_msg = "Add IP to SDS '%s' operation failed with " \
@@ -902,15 +910,16 @@ class PowerFlexSDS(object):
:return: Boolean indicating if add IP operation is successful
"""
try:
- LOG.info("Role updates for: %s", sds_ip_list)
- if len(sds_ip_list) != 0:
- for ip in sds_ip_list:
- LOG.info("ip-role: %s", ip)
- self.powerflex_conn.sds.set_ip_role(sds_id, ip['ip'],
- ip['role'])
- msg = "The role '%s' for IP '%s' is updated " \
- "successfully." % (ip['role'], ip['ip'])
- LOG.info(msg)
+ if not self.module.check_mode:
+ LOG.info("Role updates for: %s", sds_ip_list)
+ if len(sds_ip_list) != 0:
+ for ip in sds_ip_list:
+ LOG.info("ip-role: %s", ip)
+ self.powerflex_conn.sds.set_ip_role(sds_id, ip['ip'],
+ ip['role'])
+ msg = "The role '%s' for IP '%s' is updated " \
+ "successfully." % (ip['role'], ip['ip'])
+ LOG.info(msg)
return True
except Exception as e:
error_msg = "Update role of IP for SDS '%s' operation failed " \
@@ -928,10 +937,11 @@ class PowerFlexSDS(object):
:return: Boolean indicating if remove IP operation is successful
"""
try:
- for ip in sds_ip_list:
- LOG.info("IP to remove: %s", ip)
- self.powerflex_conn.sds.remove_ip(sds_id=sds_id, ip=ip['ip'])
- LOG.info("IP removed successfully.")
+ if not self.module.check_mode:
+ for ip in sds_ip_list:
+ LOG.info("IP to remove: %s", ip)
+ self.powerflex_conn.sds.remove_ip(sds_id=sds_id, ip=ip['ip'])
+ LOG.info("IP removed successfully.")
return True
except Exception as e:
error_msg = "Remove IP from SDS '%s' operation failed with " \
@@ -946,145 +956,16 @@ class PowerFlexSDS(object):
:return: Boolean indicating if delete operation is successful
"""
try:
- self.powerflex_conn.sds.delete(sds_id)
- return True
+ if not self.module.check_mode:
+ self.powerflex_conn.sds.delete(sds_id)
+ return None
+ return self.get_sds_details(sds_id=sds_id)
except Exception as e:
error_msg = "Delete SDS '%s' operation failed with error '%s'" \
% (sds_id, str(e))
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- def perform_module_operation(self):
- """
- Perform different actions on SDS based on parameters passed in
- the playbook
- """
- sds_name = self.module.params['sds_name']
- sds_id = self.module.params['sds_id']
- sds_new_name = self.module.params['sds_new_name']
- protection_domain_name = self.module.params['protection_domain_name']
- protection_domain_id = self.module.params['protection_domain_id']
- rfcache_enabled = self.module.params['rfcache_enabled']
- rmcache_enabled = self.module.params['rmcache_enabled']
- rmcache_size = self.module.params['rmcache_size']
- sds_ip_list = copy.deepcopy(self.module.params['sds_ip_list'])
- sds_ip_state = self.module.params['sds_ip_state']
- performance_profile = self.module.params['performance_profile']
- state = self.module.params['state']
-
- # result is a dictionary to contain end state and SDS details
- changed = False
- result = dict(
- changed=False,
- sds_details={}
- )
-
- # get SDS details
- sds_details = self.get_sds_details(sds_name, sds_id)
- if sds_details:
- sds_id = sds_details['id']
- msg = "Fetched the SDS details %s" % (str(sds_details))
- LOG.info(msg)
-
- # get Protection Domain ID from name
- if protection_domain_name:
- pd_details = self.get_protection_domain(protection_domain_name)
- if pd_details:
- protection_domain_id = pd_details['id']
- msg = "Fetched the protection domain details with id '%s', " \
- "name '%s'" % (protection_domain_id, protection_domain_name)
- LOG.info(msg)
-
- # create operation
- create_changed = False
- if state == 'present' and not sds_details:
- if sds_id:
- error_msg = "Creation of SDS is allowed using sds_name " \
- "only, sds_id given."
- LOG.info(error_msg)
- self.module.fail_json(msg=error_msg)
-
- if sds_new_name:
- error_msg = "sds_new_name parameter is not supported " \
- "during creation of a SDS. Try renaming the " \
- "SDS after the creation."
- LOG.info(error_msg)
- self.module.fail_json(msg=error_msg)
-
- self.validate_ip_parameter(sds_ip_list)
-
- create_changed = self.create_sds(protection_domain_id,
- sds_ip_list, sds_ip_state,
- sds_name, rmcache_enabled,
- rmcache_size)
- if create_changed:
- sds_details = self.get_sds_details(sds_name)
- sds_id = sds_details['id']
- msg = "SDS created successfully, fetched SDS details %s"\
- % (str(sds_details))
- LOG.info(msg)
-
- # checking if basic SDS parameters are modified or not
- modify_dict = {}
- if sds_details and state == 'present':
- modify_dict = self.to_modify(sds_details, sds_new_name,
- rfcache_enabled, rmcache_enabled,
- rmcache_size, performance_profile)
- msg = "Parameters to be modified are as follows: %s"\
- % (str(modify_dict))
- LOG.info(msg)
-
- # modify operation
- modify_changed = False
- if modify_dict and state == 'present':
- LOG.info("Modify SDS params.")
- modify_changed = self.modify_sds_attributes(sds_id, modify_dict,
- create_changed)
-
- # get updated SDS details
- sds_details = self.get_sds_details(sds_id=sds_id)
-
- # add IPs to SDS
- # update IP's role for an SDS
- add_ip_changed = False
- update_role_changed = False
- if sds_details and state == 'present' \
- and sds_ip_state == "present-in-sds":
- self.validate_ip_parameter(sds_ip_list)
- ips_to_add, roles_to_update = self.identify_ip_role(
- sds_ip_list, sds_details, sds_ip_state)
- if ips_to_add:
- add_ip_changed = self.add_ip(sds_id, ips_to_add)
- if roles_to_update:
- update_role_changed = self.update_role(sds_id,
- roles_to_update)
-
- # remove IPs from SDS
- remove_ip_changed = False
- if sds_details and state == 'present' \
- and sds_ip_state == "absent-in-sds":
- self.validate_ip_parameter(sds_ip_list)
- ips_to_remove = self.identify_ip_role(sds_ip_list, sds_details,
- sds_ip_state)
- if ips_to_remove:
- remove_ip_changed = self.remove_ip(sds_id, ips_to_remove)
-
- # delete operation
- delete_changed = False
- if sds_details and state == 'absent':
- delete_changed = self.delete_sds(sds_id)
-
- if create_changed or modify_changed or add_ip_changed \
- or update_role_changed or remove_ip_changed or delete_changed:
- changed = True
-
- # Returning the updated SDS details
- if state == 'present':
- sds_details = self.show_output(sds_id)
- result['sds_details'] = sds_details
- result['changed'] = changed
- self.module.exit_json(**result)
-
def show_output(self, sds_id):
"""Show SDS details
:param sds_id: ID of the SDS
@@ -1115,6 +996,14 @@ class PowerFlexSDS(object):
rmcache_size_mb = sds_details[0]['rmcacheSizeInKb'] / 1024
sds_details[0]['rmcacheSizeInMb'] = int(rmcache_size_mb)
+ # Append fault set name
+ if 'faultSetId' in sds_details[0] \
+ and sds_details[0]['faultSetId']:
+ fs_details = self.get_fault_set(
+ fault_set_id=sds_details[0]['faultSetId'],
+ protection_domain_id=sds_details[0]['protectionDomainId'])
+ sds_details[0]['faultSetName'] = fs_details['name']
+
return sds_details[0]
except Exception as e:
@@ -1123,6 +1012,15 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
+ def validate_parameters(self, sds_params):
+ params = [sds_params['sds_name'], sds_params['sds_new_name']]
+ for param in params:
+ if param is not None and len(param.strip()) == 0:
+ error_msg = "Provide valid value for name for the " \
+ "creation/modification of the SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
def get_powerflex_sds_parameters():
"""This method provide parameter required for the SDS module on
@@ -1145,15 +1043,137 @@ def get_powerflex_sds_parameters():
rmcache_enabled=dict(type='bool'),
rmcache_size=dict(type='int'),
performance_profile=dict(choices=['Compact', 'HighPerformance']),
+ fault_set_name=dict(),
+ fault_set_id=dict(),
state=dict(required=True, type='str', choices=['present', 'absent'])
)
+class SDSExitHandler():
+ def handle(self, sds_obj, sds_details):
+ if sds_details:
+ sds_obj.result["sds_details"] = sds_obj.show_output(sds_id=sds_details['id'])
+ else:
+ sds_obj.result["sds_details"] = None
+ sds_obj.module.exit_json(**sds_obj.result)
+
+
+class SDSDeleteHandler():
+ def handle(self, sds_obj, sds_params, sds_details):
+ if sds_params['state'] == 'absent' and sds_details:
+ sds_details = sds_obj.delete_sds(sds_details['id'])
+ sds_obj.result['changed'] = True
+
+ SDSExitHandler().handle(sds_obj, sds_details)
+
+
+class SDSRemoveIPHandler():
+ def handle(self, sds_obj, sds_params, sds_details, sds_ip_list):
+ if sds_params['state'] == 'present' and sds_details:
+ # remove IPs from SDS
+ remove_ip_changed = False
+ if sds_params['sds_ip_state'] == "absent-in-sds":
+ sds_obj.validate_ip_parameter(sds_ip_list)
+ ips_to_remove = sds_obj.identify_ip_role_remove(sds_ip_list, sds_details,
+ sds_params['sds_ip_state'])
+ if ips_to_remove:
+ remove_ip_changed = sds_obj.remove_ip(sds_details['id'], ips_to_remove)
+
+ if remove_ip_changed:
+ sds_obj.result['changed'] = True
+
+ SDSDeleteHandler().handle(sds_obj, sds_params, sds_details)
+
+
+class SDSAddIPHandler():
+ def handle(self, sds_obj, sds_params, sds_details, sds_ip_list):
+ if sds_params['state'] == 'present' and sds_details:
+ # add IPs to SDS
+ # update IP's role for an SDS
+ add_ip_changed = False
+ update_role_changed = False
+ if sds_params['sds_ip_state'] == "present-in-sds":
+ sds_obj.validate_ip_parameter(sds_ip_list)
+ ips_to_add, roles_to_update = sds_obj.identify_ip_role_add(
+ sds_ip_list, sds_details, sds_params['sds_ip_state'])
+ if ips_to_add:
+ add_ip_changed = sds_obj.add_ip(sds_details['id'], ips_to_add)
+ if roles_to_update:
+ update_role_changed = sds_obj.update_role(sds_details['id'],
+ roles_to_update)
+
+ if add_ip_changed or update_role_changed:
+ sds_obj.result['changed'] = True
+
+ SDSRemoveIPHandler().handle(sds_obj, sds_params, sds_details, sds_ip_list)
+
+
+class SDSModifyHandler():
+ def handle(self, sds_obj, sds_params, sds_details, create_flag, sds_ip_list):
+ if sds_params['state'] == 'present' and sds_details:
+ modify_dict = sds_obj.to_modify(sds_details=sds_details,
+ sds_new_name=sds_params['sds_new_name'],
+ rfcache_enabled=sds_params['rfcache_enabled'],
+ rmcache_enabled=sds_params['rmcache_enabled'],
+ rmcache_size=sds_params['rmcache_size'],
+ performance_profile=sds_params['performance_profile'])
+ if modify_dict:
+ sds_details = sds_obj.modify_sds_attributes(sds_id=sds_details['id'],
+ modify_dict=modify_dict,
+ create_flag=create_flag)
+ sds_obj.result['changed'] = True
+
+ SDSAddIPHandler().handle(sds_obj, sds_params, sds_details, sds_ip_list)
+
+
+class SDSCreateHandler():
+ def handle(self, sds_obj, sds_params, sds_details, protection_domain_id, fault_set_id):
+ create_flag = False
+ sds_ip_list = copy.deepcopy(sds_params['sds_ip_list'])
+ if sds_params['state'] == 'present' and not sds_details:
+ sds_details = sds_obj.create_sds(sds_name=sds_params['sds_name'],
+ sds_id=sds_params['sds_id'],
+ sds_new_name=sds_params['sds_new_name'],
+ protection_domain_id=protection_domain_id,
+ sds_ip_list=sds_ip_list,
+ sds_ip_state=sds_params['sds_ip_state'],
+ rmcache_enabled=sds_params['rmcache_enabled'],
+ rmcache_size=sds_params['rmcache_size'],
+ fault_set_id=fault_set_id)
+ sds_obj.result['changed'] = True
+ create_flag = True
+
+ SDSModifyHandler().handle(sds_obj, sds_params, sds_details, create_flag, sds_ip_list)
+
+
+class SDSHandler():
+ def handle(self, sds_obj, sds_params):
+ sds_details = sds_obj.get_sds_details(sds_params['sds_name'], sds_params['sds_id'])
+ sds_obj.validate_parameters(sds_params=sds_params)
+ protection_domain_id = None
+ if sds_params['protection_domain_id'] or sds_params['protection_domain_name']:
+ protection_domain_id = sds_obj.get_protection_domain(
+ protection_domain_id=sds_params['protection_domain_id'],
+ protection_domain_name=sds_params['protection_domain_name'])['id']
+ fault_set_id = None
+ if sds_params['fault_set_name'] or sds_params['fault_set_id']:
+ fault_set_details = sds_obj.get_fault_set(fault_set_name=sds_params['fault_set_name'],
+ fault_set_id=sds_params['fault_set_id'],
+ protection_domain_id=protection_domain_id)
+ if fault_set_details is None:
+ error_msg = "The specified Fault set is not in the specified Protection Domain."
+ LOG.error(error_msg)
+ sds_obj.module.fail_json(msg=error_msg)
+ else:
+ fault_set_id = fault_set_details['id']
+ SDSCreateHandler().handle(sds_obj, sds_params, sds_details, protection_domain_id, fault_set_id)
+
+
def main():
- """ Create PowerFlex SDS object and perform actions on it
- based on user input from playbook"""
+ """ Create PowerFlex SDS object and perform action on it
+ based on user input from playbook."""
obj = PowerFlexSDS()
- obj.perform_module_operation()
+ SDSHandler().handle(obj, obj.module.params)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py
index 69caea075..0cc41c50e 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py
@@ -150,7 +150,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
snapshot_name: "ansible_snapshot"
vol_name: "ansible_volume"
- read_only: False
+ read_only: false
desired_retention: 2
state: "present"
@@ -171,9 +171,9 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
snapshot_id: "fe6cb28200000007"
sdc:
- - sdc_ip: "198.10.xxx.xxx"
- - sdc_id: "663ac0d200000001"
- allow_multiple_mappings: True
+ - sdc_ip: "198.10.xxx.xxx"
+ - sdc_id: "663ac0d200000001"
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
@@ -185,13 +185,13 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
snapshot_id: "fe6cb28200000007"
sdc:
- - sdc_ip: "198.10.xxx.xxx"
- iops_limit: 11
- bandwidth_limit: 4096
- - sdc_id: "663ac0d200000001"
- iops_limit: 20
- bandwidth_limit: 2048
- allow_multiple_mappings: True
+ - sdc_ip: "198.10.xxx.xxx"
+ iops_limit: 11
+ bandwidth_limit: 4096
+ - sdc_id: "663ac0d200000001"
+ iops_limit: 20
+ bandwidth_limit: 2048
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py
new file mode 100644
index 000000000..af2084e55
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py
@@ -0,0 +1,828 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2023, Dell Technologies
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing snapshot policies on Dell Technologies (Dell) PowerFlex"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: snapshot_policy
+version_added: '1.7.0'
+short_description: Manage snapshot policies on Dell PowerFlex
+description:
+- Managing snapshot policies on PowerFlex storage system includes
+ creating, getting details, modifying attributes, adding a source volume,
+ removing a source volume and deleting a snapshot policy.
+author:
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+extends_documentation_fragment:
+ - dellemc.powerflex.powerflex
+options:
+ snapshot_policy_name:
+ description:
+ - The name of the snapshot policy.
+ - It is unique across the PowerFlex array.
+ - Mutually exclusive with I(snapshot_policy_id).
+ type: str
+ snapshot_policy_id:
+ description:
+ - The unique identifier of the snapshot policy.
+ - Except create operation, all other operations can be performed
+ using I(snapshot_policy_id).
+ - Mutually exclusive with I(snapshot_policy_name).
+ type: str
+ auto_snapshot_creation_cadence:
+ description:
+ - The auto snapshot creation cadence of the snapshot policy.
+ type: dict
+ suboptions:
+ time:
+ description:
+ - The time between creation of two snapshots.
+ type: int
+ unit:
+ description:
+ - The unit of the auto snapshot creation cadence.
+ type: str
+ choices: ["Minute", "Hour", "Day", "Week"]
+ default: "Minute"
+ num_of_retained_snapshots_per_level:
+ description:
+ - Number of retained snapshots per level.
+ type: list
+ elements: int
+ new_name:
+ description:
+ - New name of the snapshot policy.
+ type: str
+ access_mode:
+ description:
+ - Access mode of the snapshot policy.
+ choices: ['READ_WRITE', 'READ_ONLY']
+ type: str
+ secure_snapshots:
+ description:
+ - Whether to secure snapshots or not.
+ - Used only in the create operation.
+ type: bool
+ source_volume:
+ description:
+ - The source volume details to be added or removed.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description:
+ - The unique identifier of the source volume
+ to be added or removed.
+ - Mutually exclusive with I(name).
+ type: str
+ name:
+ description:
+ - The name of the source volume to be added or removed.
+ - Mutually exclusive with I(id).
+ type: str
+ auto_snap_removal_action:
+ description:
+ - Ways to handle the snapshots created by the policy (auto snapshots).
+ - Must be provided when I(state) is set to C('absent').
+ choices: ['Remove', 'Detach']
+ type: str
+ detach_locked_auto_snapshots:
+ description:
+ - Whether to detach the locked auto snapshots during removal of source volume.
+ type: bool
+ state:
+ description:
+ - The state of the source volume.
+ - When C(present), source volume will be added to the snapshot policy.
+ - When C(absent), source volume will be removed from the snapshot policy.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ pause:
+ description:
+ - Whether to pause or resume the snapshot policy.
+ type: bool
+ state:
+ description:
+ - State of the snapshot policy.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - The I(check_mode) is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ access_mode: "READ_WRITE"
+ secure_snapshots: false
+ auto_snapshot_creation_cadence:
+ time: 1
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 20
+ state: "present"
+
+- name: Get snapshot policy details using name
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+
+- name: Get snapshot policy details using id
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_id: "snapshot_policy_id_1"
+
+- name: Modify a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ auto_snapshot_creation_cadence:
+ time: 2
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 40
+
+- name: Rename a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ new_name: "snapshot_policy_name_1_new"
+
+- name: Add source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ source_volume:
+ - name: "source_volume_name_1"
+ - id: "source_volume_id_2"
+ state: "present"
+
+- name: Remove source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ source_volume:
+ - name: "source_volume_name_1"
+ auto_snap_removal_action: 'Remove'
+ state: "absent"
+ - id: "source_volume_id_2"
+ auto_snap_removal_action: 'Remove'
+ detach_locked_auto_snapshots: true
+ state: "absent"
+
+- name: Pause a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ pause: true
+
+- name: Resume a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ pause: false
+
+- name: Delete a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: 'false'
+snapshot_policy_details:
+ description: Details of the snapshot policy.
+ returned: When snapshot policy exists
+ type: dict
+ contains:
+ autoSnapshotCreationCadenceInMin:
+ description: The snapshot rule of the snapshot policy.
+ type: int
+ id:
+ description: The ID of the snapshot policy.
+ type: str
+ lastAutoSnapshotCreationFailureReason:
+ description: The reason for the failure of last auto snapshot creation .
+ type: str
+ name:
+ description: Name of the snapshot policy.
+ type: str
+ lastAutoSnapshotFailureInFirstLevel:
+ description: Whether the last auto snapshot in first level failed.
+ type: bool
+ maxVTreeAutoSnapshots:
+ description: Maximum number of VTree auto snapshots.
+ type: int
+ nextAutoSnapshotCreationTime:
+ description: The time of creation of the next auto snapshot.
+ type: int
+ numOfAutoSnapshots:
+ description: Number of auto snapshots.
+ type: int
+ numOfCreationFailures:
+ description: Number of creation failures.
+ type: int
+ numOfExpiredButLockedSnapshots:
+ description: Number of expired but locked snapshots.
+ type: int
+ numOfLockedSnapshots:
+ description: Number of locked snapshots.
+ type: int
+ numOfRetainedSnapshotsPerLevel:
+ description: Number of snapshots retained per level
+ type: list
+ numOfSourceVolumes:
+ description: Number of source volumes.
+ type: int
+ secureSnapshots:
+ description: Whether the snapshots are secured.
+ type: bool
+ snapshotAccessMode:
+ description: Access mode of the snapshots.
+ type: str
+ snapshotPolicyState:
+ description: State of the snapshot policy.
+ type: str
+ systemId:
+ description: Unique identifier of the PowerFlex system.
+ type: str
+ timeOfLastAutoSnapshot:
+ description: Time of the last auto snapshot creation.
+ type: str
+ timeOfLastAutoSnapshotCreationFailure:
+ description: Time of the failure of the last auto snapshot creation.
+ type: str
+ statistics:
+ description: Statistics details of the snapshot policy.
+ type: dict
+ contains:
+ autoSnapshotVolIds:
+ description: Volume Ids of all the auto snapshots.
+ type: list
+ expiredButLockedSnapshotsIds:
+ description: Ids of expired but locked snapshots.
+ type: list
+ numOfAutoSnapshots:
+ description: Number of auto snapshots.
+ type: int
+ numOfExpiredButLockedSnapshots:
+ description: Number of expired but locked snapshots.
+ type: int
+ numOfSrcVols:
+ description: Number of source volumes.
+ type: int
+ srcVolIds:
+ description: Ids of the source volumes.
+ type: list
+
+ sample: {
+ "autoSnapshotCreationCadenceInMin": 120,
+ "id": "15ae842800000004",
+ "lastAutoSnapshotCreationFailureReason": "NR",
+ "lastAutoSnapshotFailureInFirstLevel": false,
+ "links": [
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004/relationships/Statistics",
+ "rel": "/api/SnapshotPolicy/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004/relationships/SourceVolume",
+ "rel": "/api/SnapshotPolicy/relationship/SourceVolume"
+ },
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004/relationships/AutoSnapshotVolume",
+ "rel": "/api/SnapshotPolicy/relationship/AutoSnapshotVolume"
+ },
+ {
+ "href": "/api/instances/System::0e7a082862fedf0f",
+ "rel": "/api/parent/relationship/systemId"
+ }
+ ],
+ "maxVTreeAutoSnapshots": 40,
+ "name": "Sample_snapshot_policy_1",
+ "nextAutoSnapshotCreationTime": 1683709201,
+ "numOfAutoSnapshots": 0,
+ "numOfCreationFailures": 0,
+ "numOfExpiredButLockedSnapshots": 0,
+ "numOfLockedSnapshots": 0,
+ "numOfRetainedSnapshotsPerLevel": [
+ 40
+ ],
+ "numOfSourceVolumes": 0,
+ "secureSnapshots": false,
+ "snapshotAccessMode": "ReadWrite",
+ "snapshotPolicyState": "Active",
+ "statistics": {
+ "autoSnapshotVolIds": [],
+ "expiredButLockedSnapshotsIds": [],
+ "numOfAutoSnapshots": 0,
+ "numOfExpiredButLockedSnapshots": 0,
+ "numOfSrcVols": 0,
+ "srcVolIds": []
+ },
+ "systemId": "0e7a082862fedf0f",
+ "timeOfLastAutoSnapshot": 0,
+ "timeOfLastAutoSnapshotCreationFailure": 0
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('snapshot_policy')
+
+
+class PowerFlexSnapshotPolicy(object):
+ """Class with snapshot policies operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_powerflex_gateway_host_parameters()
+ self.module_params.update(get_powerflex_snapshot_policy_parameters())
+
+ mut_ex_args = [['snapshot_policy_name', 'snapshot_policy_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mut_ex_args)
+
+ utils.ensure_required_libs(self.module)
+
+ self.result = dict(
+ changed=False,
+ snapshot_policy_details={}
+ )
+
+ try:
+ self.powerflex_conn = utils.get_powerflex_gateway_host_connection(
+ self.module.params)
+ LOG.info("Got the PowerFlex system connection object instance")
+ except Exception as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
+
+ def get_snapshot_policy(self, snap_pol_id=None, snap_pol_name=None):
+ """Get snapshot policy details
+ :param snap_pol_name: Name of the snapshot policy.
+ :param snap_pol_id: ID of the snapshot policy.
+ :return: snapshot policy details
+ """
+ try:
+ snap_pol_details = None
+ if snap_pol_id:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.get(
+ filter_fields={'id': snap_pol_id})
+
+ if snap_pol_name:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.get(
+ filter_fields={'name': snap_pol_name})
+
+ if not snap_pol_details:
+ msg = "Unable to find the snapshot policy."
+ LOG.info(msg)
+ return None
+
+ # Append statistics
+ statistics = self.powerflex_conn.snapshot_policy.get_statistics(snap_pol_details[0]['id'])
+ snap_pol_details[0]['statistics'] = statistics if statistics else {}
+ return snap_pol_details[0]
+
+ except Exception as e:
+ errormsg = f'Failed to get the snapshot policy with error {str(e)}'
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_snapshot_policy(self, auto_snapshot_creation_cadence_in_min, num_of_retained_snapshots_per_level,
+ access_mode, secure_snapshots, snapshot_policy_name=None):
+ """Create snapshot_policy
+ :param auto_snapshot_creation_cadence_in_min: The auto snapshot creation cadence of the snapshot policy.
+ :param num_of_retained_snapshots_per_level: Number of retained snapshots per level.
+ :param access_mode: Access mode of the snapshot policy.
+ :param secure_snapshots: Whether to secure snapshots or not.
+ :param snapshot_policy_name: Name of the snapshot policy.
+ :return: Id of the snapshot policy, if created.
+ """
+ try:
+ if not self.module.check_mode:
+ policy_id = self.powerflex_conn.snapshot_policy.create(
+ auto_snap_creation_cadence_in_min=auto_snapshot_creation_cadence_in_min,
+ retained_snaps_per_level=num_of_retained_snapshots_per_level, name=snapshot_policy_name,
+ snapshotAccessMode=access_mode, secureSnapshots=secure_snapshots)
+ return policy_id
+
+ except Exception as e:
+ errormsg = f'Creation of snapshot policy failed with error {str(e)}'
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_snapshot_policy(self, snap_pol_id):
+ """Delete snapshot policy
+ :param snap_pol_id: The unique identifier of the snapshot policy.
+ :return: Details of the snapshot policy.
+ """
+
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.snapshot_policy.delete(snap_pol_id)
+ return self.get_snapshot_policy(snap_pol_id=snap_pol_id)
+
+ except Exception as e:
+ errormsg = (f'Deletion of snapshot policy {snap_pol_id} '
+ f'failed with error {str(e)}')
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_volume(self, vol_name=None, vol_id=None):
+ """Get volume details
+ :param vol_name: Name of the volume
+ :param vol_id: ID of the volume
+ :return: Details of volume if exist.
+ """
+
+ id_or_name = vol_id if vol_id else vol_name
+
+ try:
+ if vol_name:
+ volume_details = self.powerflex_conn.volume.get(
+ filter_fields={'name': vol_name})
+ else:
+ volume_details = self.powerflex_conn.volume.get(
+ filter_fields={'id': vol_id})
+
+ if len(volume_details) == 0:
+ error_msg = f"Volume with identifier {id_or_name} not found"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ # Append snapshot policy name and id
+ if volume_details[0]['snplIdOfSourceVolume'] is not None:
+ snap_policy_id = volume_details[0]['snplIdOfSourceVolume']
+ volume_details[0]['snapshotPolicyId'] = snap_policy_id
+ volume_details[0]['snapshotPolicyName'] = \
+ self.get_snapshot_policy(snap_policy_id)['name']
+
+ return volume_details[0]
+
+ except Exception as e:
+ error_msg = (f"Failed to get the volume {id_or_name}"
+ f" with error {str(e)}")
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def manage_source_volume(self, snap_pol_details, vol_details, source_volume_element):
+ """Adding or removing a source volume
+ :param snap_pol_details: Details of the snapshot policy details.
+ :param vol_details: Details of the volume.
+ :param source_volume_element: The index of the source volume in the
+ list of volumes to be added/removed.
+ :return: Boolean indicating whether volume is added/removed.
+ """
+ try:
+ if self.module.params['source_volume'][source_volume_element]['state'] == 'present' and \
+ vol_details['snplIdOfSourceVolume'] != snap_pol_details['id']:
+ if not self.module.check_mode:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.add_source_volume(
+ snapshot_policy_id=snap_pol_details['id'],
+ volume_id=vol_details['id'])
+ LOG.info("Source volume successfully added")
+ return True
+
+ elif self.module.params['source_volume'][source_volume_element]['state'] == 'absent' and \
+ vol_details['snplIdOfSourceVolume'] == snap_pol_details['id']:
+ if not self.module.check_mode:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.remove_source_volume(
+ snapshot_policy_id=snap_pol_details['id'],
+ volume_id=vol_details['id'],
+ auto_snap_removal_action=self.module.params['source_volume'][source_volume_element]['auto_snap_removal_action'],
+ detach_locked_auto_snaps=self.module.params['source_volume'][source_volume_element]['detach_locked_auto_snapshots'])
+ LOG.info("Source volume successfully removed")
+ return True
+
+ except Exception as e:
+ error_msg = f"Failed to manage the source volume {vol_details['id']} with error {str(e)}"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def pause_snapshot_policy(self, snap_pol_details):
+ """Pausing or resuming a snapshot policy.
+ :param snap_pol_details: Details of the snapshot policy details.
+ :return: Boolean indicating whether snapshot policy is paused/removed or not.
+ """
+ try:
+ if self.module.params['pause'] and \
+ snap_pol_details['snapshotPolicyState'] != "Paused":
+ if not self.module.check_mode:
+ self.powerflex_conn.snapshot_policy.pause(
+ snapshot_policy_id=snap_pol_details['id'])
+ LOG.info("Snapshot policy successfully paused.")
+ return True
+
+ elif not self.module.params['pause'] and \
+ snap_pol_details['snapshotPolicyState'] == "Paused":
+ if not self.module.check_mode:
+ self.powerflex_conn.snapshot_policy.resume(
+ snapshot_policy_id=snap_pol_details['id'])
+ LOG.info("Snapshot policy successfully resumed.")
+ return True
+
+ except Exception as e:
+ error_msg = f"Failed to pause/resume {snap_pol_details['id']} with error {str(e)}"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_modify(self, snap_pol_details, auto_snapshot_creation_cadence_in_min, num_of_retained_snapshots_per_level, new_name):
+ """Whether to modify the snapshot policy or not
+ :param snap_pol_details: Details of the snapshot policy.
+ :param auto_snapshot_creation_cadence_in_min: Snapshot rule of the policy.
+ :param num_of_retained_snapshots_per_level: Retention rule of the policy.
+ :param new_name: The new name of the snapshot policy.
+ :return: Dictionary containing the attributes of
+ snapshot policy which are to be updated
+ """
+ modify_dict = {}
+
+ if self.module_params['auto_snapshot_creation_cadence'] is not None and \
+ snap_pol_details['autoSnapshotCreationCadenceInMin'] != auto_snapshot_creation_cadence_in_min:
+ modify_dict['auto_snapshot_creation_cadence_in_min'] = auto_snapshot_creation_cadence_in_min
+
+ if num_of_retained_snapshots_per_level is not None and \
+ snap_pol_details['numOfRetainedSnapshotsPerLevel'] != num_of_retained_snapshots_per_level:
+ modify_dict['num_of_retained_snapshots_per_level'] = num_of_retained_snapshots_per_level
+
+ if new_name is not None:
+ if len(new_name.strip()) == 0:
+ self.module.fail_json(
+ msg="Provide valid volume name.")
+ if new_name != snap_pol_details['name']:
+ modify_dict['new_name'] = new_name
+
+ return modify_dict
+
+ def modify_snapshot_policy(self, snap_pol_details, modify_dict):
+ """
+ Modify the snapshot policy attributes
+ :param snap_pol_details: Details of the snapshot policy
+ :param modify_dict: Dictionary containing the attributes of
+ snapshot policy which are to be updated
+ :return: True, if the operation is successful
+ """
+ try:
+ msg = (f"Dictionary containing attributes which are to be"
+ f" updated is {str(modify_dict)}.")
+ LOG.info(msg)
+ if not self.module.check_mode:
+ if 'new_name' in modify_dict:
+ self.powerflex_conn.snapshot_policy.rename(snap_pol_details['id'],
+ modify_dict['new_name'])
+ msg = (f"The name of the volume is updated"
+ f" to {modify_dict['new_name']} sucessfully.")
+ LOG.info(msg)
+
+ if 'auto_snapshot_creation_cadence_in_min' in modify_dict and \
+ 'num_of_retained_snapshots_per_level' not in modify_dict:
+ self.powerflex_conn.snapshot_policy.modify(
+ snapshot_policy_id=snap_pol_details['id'],
+ auto_snap_creation_cadence_in_min=modify_dict['auto_snapshot_creation_cadence_in_min'],
+ retained_snaps_per_level=snap_pol_details['numOfRetainedSnapshotsPerLevel'])
+ msg = f"The snapshot rule is updated to {modify_dict['auto_snapshot_creation_cadence_in_min']}"
+ LOG.info(msg)
+
+ elif 'auto_snapshot_creation_cadence_in_min' not in modify_dict and 'num_of_retained_snapshots_per_level' in modify_dict:
+ self.powerflex_conn.snapshot_policy.modify(
+ snapshot_policy_id=snap_pol_details['id'],
+ auto_snap_creation_cadence_in_min=snap_pol_details['autoSnapshotCreationCadenceInMin'],
+ retained_snaps_per_level=modify_dict['num_of_retained_snapshots_per_level'])
+ msg = f"The retention rule is updated to {modify_dict['num_of_retained_snapshots_per_level']}"
+ LOG.info(msg)
+
+ elif 'auto_snapshot_creation_cadence_in_min' in modify_dict and 'num_of_retained_snapshots_per_level' in modify_dict:
+ self.powerflex_conn.snapshot_policy.modify(
+ snapshot_policy_id=snap_pol_details['id'],
+ auto_snap_creation_cadence_in_min=modify_dict['auto_snapshot_creation_cadence_in_min'],
+ retained_snaps_per_level=modify_dict['num_of_retained_snapshots_per_level'])
+ msg = (f"The snapshot rule is updated to {modify_dict['auto_snapshot_creation_cadence_in_min']}"
+ f" and the retention rule is updated to {modify_dict['num_of_retained_snapshots_per_level']}")
+ LOG.info(msg)
+
+ return True
+
+ except Exception as e:
+ err_msg = (f"Failed to update the snapshot policy {snap_pol_details['id']}"
+ f" with error {str(e)}")
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+
+def get_access_mode(access_mode):
+ """
+ :param access_mode: Access mode of the snapshot policy
+ :return: The enum for the access mode
+ """
+
+ access_mode_dict = {
+ "READ_WRITE": "ReadWrite",
+ "READ_ONLY": "ReadOnly"
+ }
+ return access_mode_dict.get(access_mode)
+
+
+def get_powerflex_snapshot_policy_parameters():
+ """This method provide parameter required for the snapshot
+ policy module on PowerFlex"""
+ return dict(
+ snapshot_policy_name=dict(), snapshot_policy_id=dict(),
+ new_name=dict(),
+ access_mode=dict(choices=['READ_WRITE', 'READ_ONLY']),
+ secure_snapshots=dict(type='bool'),
+ auto_snapshot_creation_cadence=dict(type='dict', options=dict(
+ time=dict(type='int'),
+ unit=dict(choices=['Minute', 'Hour', 'Day', 'Week'],
+ default='Minute'))),
+ num_of_retained_snapshots_per_level=dict(type='list', elements='int'),
+ source_volume=dict(type='list', elements='dict', options=dict(
+ id=dict(), name=dict(),
+ auto_snap_removal_action=dict(choices=['Remove', 'Detach']),
+ detach_locked_auto_snapshots=dict(type='bool'),
+ state=dict(default='present', choices=['present', 'absent']))),
+ pause=dict(type='bool'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+
+class SnapshotPolicyCreateHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details, access_mode, auto_snapshot_creation_cadence_in_min):
+ if con_params['state'] == 'present' and not snapshot_policy_details:
+ if con_params['snapshot_policy_id']:
+ con_object.module.fail_json(msg="Creation of snapshot "
+ "policy is allowed "
+ "using snapshot_policy_name only, "
+ "snapshot_policy_id given.")
+
+ snap_pol_id = con_object.create_snapshot_policy(snapshot_policy_name=con_params['snapshot_policy_name'],
+ access_mode=access_mode,
+ secure_snapshots=con_params['secure_snapshots'],
+ auto_snapshot_creation_cadence_in_min=auto_snapshot_creation_cadence_in_min,
+ num_of_retained_snapshots_per_level=con_params['num_of_retained_snapshots_per_level'])
+ con_object.result['changed'] = True
+ if snap_pol_id:
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+
+ msg = (f"snapshot policy created successfully, fetched "
+ f"snapshot_policy details {str(snapshot_policy_details)}")
+ LOG.info(msg)
+
+ SnapshotPolicyModifyHandler().handle(con_object, con_params, snapshot_policy_details,
+ auto_snapshot_creation_cadence_in_min)
+
+
+class SnapshotPolicyModifyHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details, auto_snapshot_creation_cadence_in_min):
+ modify_dict = {}
+ if con_params['state'] == 'present' and snapshot_policy_details:
+ modify_dict = con_object.to_modify(
+ snap_pol_details=snapshot_policy_details, new_name=con_params['new_name'],
+ auto_snapshot_creation_cadence_in_min=auto_snapshot_creation_cadence_in_min,
+ num_of_retained_snapshots_per_level=con_params['num_of_retained_snapshots_per_level'])
+ msg = (f"Parameters to be modified are as"
+ f" follows: {str(modify_dict)}")
+ LOG.info(msg)
+ if modify_dict and con_params['state'] == 'present':
+ con_object.result['changed'] = con_object.modify_snapshot_policy(snap_pol_details=snapshot_policy_details,
+ modify_dict=modify_dict)
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_id=snapshot_policy_details.get("id"))
+ SnapshotPolicySourceVolumeHandler().handle(con_object, con_params, snapshot_policy_details)
+
+
+class SnapshotPolicySourceVolumeHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details):
+ if snapshot_policy_details and con_params['state'] == 'present' and con_params['source_volume'] is not None:
+ for source_volume_element in range(len(con_params['source_volume'])):
+ if not (con_params['source_volume'][source_volume_element]['id'] or
+ con_params['source_volume'][source_volume_element]['name']):
+ con_object.module.fail_json(
+ msg="Either id or name of source volume needs to be "
+ "passed with state of source volume")
+
+ elif con_params['source_volume'][source_volume_element]['id'] and \
+ con_params['source_volume'][source_volume_element]['name']:
+ con_object.module.fail_json(
+ msg="id and name of source volume are mutually exclusive")
+
+ elif con_params['source_volume'][source_volume_element]['id'] or \
+ con_params['source_volume'][source_volume_element]['name']:
+ volume_details = con_object.get_volume(vol_id=con_params['source_volume'][source_volume_element]['id'],
+ vol_name=con_params['source_volume'][source_volume_element]['name'])
+ con_object.result['changed'] = con_object.manage_source_volume(snap_pol_details=snapshot_policy_details,
+ vol_details=volume_details,
+ source_volume_element=source_volume_element)
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+
+ SnapshotPolicyPauseHandler().handle(con_object, con_params, snapshot_policy_details)
+
+
+class SnapshotPolicyPauseHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details):
+ if con_params["state"] == "present" and con_params["pause"] is not None:
+ con_object.result['changed'] = \
+ con_object.pause_snapshot_policy(snap_pol_details=snapshot_policy_details)
+ snapshot_policy_details = \
+ con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+ SnapshotPolicyDeleteHandler().handle(con_object, con_params, snapshot_policy_details)
+
+
+class SnapshotPolicyDeleteHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details):
+ if con_params['state'] == 'absent' and snapshot_policy_details:
+ snapshot_policy_details = con_object.delete_snapshot_policy(
+ snap_pol_id=snapshot_policy_details.get("id"))
+ con_object.result['changed'] = True
+ SnapshotPolicyExitHandler().handle(con_object, snapshot_policy_details)
+
+
+class SnapshotPolicyExitHandler():
+ def handle(self, con_object, snapshot_policy_details):
+ con_object.result['snapshot_policy_details'] = snapshot_policy_details
+ con_object.module.exit_json(**con_object.result)
+
+
+class SnapshotPolicyHandler():
+ def handle(self, con_object, con_params):
+ access_mode = get_access_mode(con_params['access_mode'])
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+ auto_snapshot_creation_cadence_in_min = None
+ if snapshot_policy_details:
+ auto_snapshot_creation_cadence_in_min = snapshot_policy_details['autoSnapshotCreationCadenceInMin']
+ msg = f"Fetched the snapshot policy details {str(snapshot_policy_details)}"
+ LOG.info(msg)
+ if con_params['auto_snapshot_creation_cadence'] is not None:
+ auto_snapshot_creation_cadence_in_min = utils.get_time_minutes(time=con_params['auto_snapshot_creation_cadence']['time'],
+ time_unit=con_params['auto_snapshot_creation_cadence']['unit'])
+ SnapshotPolicyCreateHandler().handle(con_object, con_params, snapshot_policy_details,
+ access_mode, auto_snapshot_creation_cadence_in_min)
+
+
+def main():
+ """ Create PowerFlex snapshot policy object and perform action on it
+ based on user input from playbook"""
+ obj = PowerFlexSnapshotPolicy()
+ SnapshotPolicyHandler().handle(obj, obj.module.params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py
index ca343212d..9c8bb1d4a 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py
@@ -88,7 +88,6 @@ notes:
'''
EXAMPLES = r'''
-
- name: Get the details of storage pool by name
dellemc.powerflex.storagepool:
hostname: "{{hostname}}"
@@ -127,8 +126,8 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
storage_pool_name: "ansible_test_pool"
protection_domain_id: "1c957da800000000"
- use_rmcache: True
- use_rfcache: True
+ use_rmcache: true
+ use_rfcache: true
state: "present"
- name: Rename storage pool by id
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/volume.py b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py
index 9c1e1cd29..0fc301831 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/volume.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py
@@ -126,7 +126,7 @@ options:
description:
- Specifies whether to allow or not allow multiple mappings.
- If the volume is mapped to one SDC then for every new mapping
- I(allow_multiple_mappings) has to be passed as True.
+ I(allow_multiple_mappings) has to be passed as true.
type: bool
sdc:
description:
@@ -175,10 +175,10 @@ options:
type: str
delete_snapshots:
description:
- - If C(True), the volume and all its dependent snapshots will be deleted.
- - If C(False), only the volume will be deleted.
+ - If C(true), the volume and all its dependent snapshots will be deleted.
+ - If C(false), only the volume will be deleted.
- It can be specified only when the I(state) is C(absent).
- - It defaults to C(False), if not specified.
+ - It defaults to C(false), if not specified.
type: bool
state:
description:
@@ -203,7 +203,7 @@ EXAMPLES = r'''
protection_domain_name: "pd_1"
vol_type: "THICK_PROVISIONED"
compression_type: "NORMAL"
- use_rmcache: True
+ use_rmcache: true
size: 16
state: "present"
@@ -215,7 +215,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- allow_multiple_mappings: True
+ allow_multiple_mappings: true
sdc:
- sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764"
access_mode: "READ_WRITE"
@@ -251,7 +251,7 @@ EXAMPLES = r'''
iops_limit: 20
- sdc_ip: "198.10.xxx.xxx"
access_mode: "READ_ONLY"
- allow_multiple_mappings: True
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
@@ -286,7 +286,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- delete_snapshots: False
+ delete_snapshots: false
state: "absent"
- name: Delete the Volume and all its dependent snapshots
@@ -297,7 +297,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- delete_snapshots: True
+ delete_snapshots: true
state: "absent"
'''