summaryrefslogtreecommitdiffstats
path: root/ansible_collections/dellemc/unity/plugins/modules
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/dellemc/unity/plugins/modules
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/dellemc/unity/plugins/modules')
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/cifsserver.py630
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py1516
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/filesystem.py1906
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py769
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/host.py1026
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/info.py1784
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/interface.py531
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nasserver.py1142
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nfs.py1873
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nfsserver.py494
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/smbshare.py877
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/snapshot.py751
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py1002
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/storagepool.py879
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/tree_quota.py706
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/user_quota.py1012
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/volume.py1277
17 files changed, 18175 insertions, 0 deletions
diff --git a/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py
new file mode 100644
index 000000000..d40c4f11d
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py
@@ -0,0 +1,630 @@
+#!/usr/bin/python
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing CIFS server on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: cifsserver
+version_added: '1.4.0'
+short_description: Manage CIFS server on Unity storage system
+description:
+- Managing the CIFS server on the Unity storage system includes creating CIFS server, getting CIFS server details
+ and deleting CIFS server.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
+options:
+ nas_server_name:
+ description:
+ - Name of the NAS server on which CIFS server will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which CIFS server will be hosted.
+ type: str
+ netbios_name:
+ description:
+ - The computer name of the SMB server in Windows network.
+ type: str
+ workgroup:
+ description:
+ - Standalone SMB server workgroup.
+ type: str
+ local_password:
+ description:
+ - Standalone SMB server administrator password.
+ type: str
+ domain:
+ description:
+ - The domain name where the SMB server is registered in Active Directory.
+ type: str
+ domain_username:
+ description:
+ - Active Directory domain user name.
+ type: str
+ domain_password:
+ description:
+ - Active Directory domain password.
+ type: str
+ cifs_server_name:
+ description:
+ - The name of the CIFS server.
+ type: str
+ cifs_server_id:
+ description:
+ - The ID of the CIFS server.
+ type: str
+ interfaces:
+ description:
+ - List of file IP interfaces that service CIFS protocol of SMB server.
+ type: list
+ elements: str
+ unjoin_cifs_server_account:
+ description:
+ - Keep SMB server account unjoined in Active Directory after deletion.
+ - C(false) specifies keep SMB server account joined after deletion.
+ - C(true) specifies unjoin SMB server account from Active Directory before deletion.
+ type: bool
+ state:
+ description:
+ - Define whether the CIFS server should exist or not.
+ choices: [absent, present]
+ required: true
+ type: str
+notes:
+- The I(check_mode) is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create CIFS server belonging to Active Directory
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "test_nas1"
+ cifs_server_name: "test_cifs"
+ domain: "ad_domain"
+ domain_username: "domain_username"
+ domain_password: "domain_password"
+ state: "present"
+
+- name: Get CIFS server details using CIFS server ID
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_37"
+ state: "present"
+
+- name: Get CIFS server details using NAS server name
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "test_nas1"
+ state: "present"
+
+- name: Delete CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_37"
+ unjoin_cifs_server_account: True
+ domain_username: "domain_username"
+ domain_password: "domain_password"
+ state: "absent"
+
+- name: Create standalone CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ netbios_name: "ANSIBLE_CIFS"
+ workgroup: "ansible"
+ local_password: "Password123!"
+ nas_server_name: "test_nas1"
+ state: "present"
+
+- name: Get CIFS server details using netbios name
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ netbios_name: "ANSIBLE_CIFS"
+ state: "present"
+
+- name: Delete standalone CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_40"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+cifs_server_details:
+ description: Details of the CIFS server.
+ returned: When CIFS server exists
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the CIFS server instance.
+ type: str
+ name:
+ description: User-specified name for the SMB server.
+ type: str
+ netbios_name:
+ description: Computer Name of the SMB server in windows network.
+ type: str
+ description:
+ description: Description of the SMB server.
+ type: str
+ domain:
+ description: Domain name where SMB server is registered in Active Directory.
+ type: str
+ workgroup:
+ description: Windows network workgroup for the SMB server.
+ type: str
+ is_standalone:
+ description: Indicates whether the SMB server is standalone.
+ type: bool
+ nasServer:
+ description: Information about the NAS server in the storage system.
+ type: dict
+ contains:
+ UnityNasServer:
+ description: Information about the NAS server in the storage system.
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the NAS server instance.
+ type: str
+ file_interfaces:
+ description: The file interfaces associated with the NAS server.
+ type: dict
+ contains:
+ UnityFileInterfaceList:
+ description: List of file interfaces associated with the NAS server.
+ type: list
+ contains:
+ UnityFileInterface:
+ description: Details of file interface associated with the NAS server.
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the file interface.
+ type: str
+ smb_multi_channel_supported:
+ description: Indicates whether the SMB 3.0+ multichannel feature is supported.
+ type: bool
+ smb_protocol_versions:
+ description: Supported SMB protocols, such as 1.0, 2.0, 2.1, 3.0, and so on.
+ type: list
+ smbca_supported:
+ description: Indicates whether the SMB server supports continuous availability.
+ type: bool
+ sample: {
+ "description": null,
+ "domain": "xxx.xxx.xxx.com",
+ "existed": true,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": -9223363258905013637,
+ "id": "if_43"
+ }
+ }
+ ]
+ },
+ "hash": -9223363258905010379,
+ "health": {
+ "UnityHealth": {
+ "hash": 8777949765559
+ }
+ },
+ "id": "cifs_40",
+ "is_standalone": false,
+ "last_used_organizational_unit": "ou=Computers,ou=Dell NAS servers",
+ "name": "ansible_cifs",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8777949765531,
+ "id": "nas_18"
+ }
+ },
+ "netbios_name": "ANSIBLE_CIFS",
+ "smb_multi_channel_supported": true,
+ "smb_protocol_versions": [
+ "1.0",
+ "2.0",
+ "2.1",
+ "3.0"
+ ],
+ "smbca_supported": true,
+ "workgroup": null
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
+
+LOG = utils.get_logger('cifsserver')
+
+
+application_type = "Ansible/1.6.0"
+
+
+class CIFSServer(object):
+ """Class with CIFS server operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_cifs_server_parameters())
+
+ mutually_exclusive = [['nas_server_name', 'nas_server_id'], ['cifs_server_id', 'cifs_server_name'],
+ ['cifs_server_id', 'netbios_name']]
+ required_one_of = [['cifs_server_id', 'cifs_server_name', 'netbios_name', 'nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ LOG.info('Check Mode Flag %s', self.module.check_mode)
+
+ def get_details(self, cifs_server_id=None, cifs_server_name=None, netbios_name=None, nas_server_id=None):
+ """Get CIFS server details.
+ :param: cifs_server_id: The ID of the CIFS server
+ :param: cifs_server_name: The name of the CIFS server
+ :param: netbios_name: Name of the SMB server in windows network
+ :param: nas_server_id: The ID of the NAS server
+ :return: Dict containing CIFS server details if exists
+ """
+
+ LOG.info("Getting CIFS server details")
+ id_or_name = get_id_name(cifs_server_id, cifs_server_name, netbios_name, nas_server_id)
+
+ try:
+ if cifs_server_id:
+ cifs_server_details = self.unity_conn.get_cifs_server(_id=cifs_server_id)
+ return process_response(cifs_server_details)
+
+ if cifs_server_name:
+ cifs_server_details = self.unity_conn.get_cifs_server(name=cifs_server_name)
+ return process_response(cifs_server_details)
+
+ if netbios_name:
+ cifs_server_details = self.unity_conn.get_cifs_server(netbios_name=netbios_name)
+ if len(cifs_server_details) > 0:
+ return process_dict(cifs_server_details._get_properties())
+
+ if nas_server_id:
+ cifs_server_details = self.unity_conn.get_cifs_server(nas_server=nas_server_id)
+ if len(cifs_server_details) > 0:
+ return process_dict(cifs_server_details._get_properties())
+ return None
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = "Failed to get CIFS server: %s due to incorrect " \
+ "username/password error: %s" % (id_or_name, str(e))
+ else:
+ msg = "Failed to get CIFS server: %s with error: %s" % (id_or_name, str(e))
+ except utils.UnityResourceNotFoundError:
+ msg = "CIFS server with ID %s not found" % cifs_server_id
+ LOG.info(msg)
+ return None
+ except utils.StoropsConnectTimeoutError as e:
+ msg = "Failed to get CIFS server: %s with error: %s. Please check unispherehost IP: %s" % (
+ id_or_name, str(e), self.module.params['unispherehost'])
+ except Exception as e:
+ msg = "Failed to get details of CIFS server: %s with error: %s" % (id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_cifs_server_instance(self, cifs_server_id):
+ """Get CIFS server instance.
+ :param: cifs_server_id: The ID of the CIFS server
+ :return: Return CIFS server instance if exists
+ """
+
+ try:
+ cifs_server_obj = utils.UnityCifsServer.get(cli=self.unity_conn._cli, _id=cifs_server_id)
+ return cifs_server_obj
+
+ except Exception as e:
+ error_msg = "Failed to get the CIFS server %s instance" \
+ " with error %s" % (cifs_server_id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_cifs_server(self, cifs_server_id, skip_unjoin=None, domain_username=None, domain_password=None):
+ """Delete CIFS server.
+ :param: cifs_server_id: The ID of the CIFS server
+ :param: skip_unjoin: Flag indicating whether to unjoin SMB server account from AD before deletion
+ :param: domain_username: The domain username
+ :param: domain_password: The domain password
+ :return: Return True if CIFS server is deleted
+ """
+
+ LOG.info("Deleting CIFS server")
+ try:
+ if not self.module.check_mode:
+ cifs_obj = self.get_cifs_server_instance(cifs_server_id=cifs_server_id)
+ cifs_obj.delete(skip_domain_unjoin=skip_unjoin, username=domain_username, password=domain_password)
+ return True
+
+ except Exception as e:
+ msg = "Failed to delete CIFS server: %s with error: %s" % (cifs_server_id, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_server_id(self, nas_server_name):
+ """Get NAS server ID.
+ :param: nas_server_name: The name of NAS server
+ :return: Return NAS server ID if exists
+ """
+
+ LOG.info("Getting NAS server ID")
+ try:
+ obj_nas = self.unity_conn.get_nas_server(name=nas_server_name)
+ return obj_nas.get_id()
+
+ except Exception as e:
+ msg = "Failed to get details of NAS server: %s with error: %s" % (nas_server_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def is_modify_interfaces(self, cifs_server_details):
+ """Check if modification is required in existing interfaces
+ :param: cifs_server_details: CIFS server details
+ :return: Flag indicating if modification is required
+ """
+
+ existing_interfaces = []
+ if cifs_server_details['file_interfaces']['UnityFileInterfaceList']:
+ for interface in cifs_server_details['file_interfaces']['UnityFileInterfaceList']:
+ existing_interfaces.append(interface['UnityFileInterface']['id'])
+
+ for interface in self.module.params['interfaces']:
+ if interface not in existing_interfaces:
+ return True
+ return False
+
+ def is_modification_required(self, cifs_server_details):
+ """Check if modification is required in existing CIFS server
+ :param: cifs_server_details: CIFS server details
+ :return: Flag indicating if modification is required
+ """
+
+ LOG.info("Checking if any modification is required")
+ param_list = ['netbios_name', 'workgroup']
+ for param in param_list:
+ if self.module.params[param] is not None and cifs_server_details[param] is not None and \
+ self.module.params[param].upper() != cifs_server_details[param]:
+ return True
+
+ # Check for domain
+ if self.module.params['domain'] is not None and cifs_server_details['domain'] is not None and \
+ self.module.params['domain'] != cifs_server_details['domain']:
+ return True
+
+ # Check file interfaces
+ if self.module.params['interfaces'] is not None:
+ return self.is_modify_interfaces(cifs_server_details)
+ return False
+
+ def create_cifs_server(self, nas_server_id, interfaces=None, netbios_name=None, cifs_server_name=None, domain=None,
+ domain_username=None, domain_password=None, workgroup=None, local_password=None):
+ """Create CIFS server.
+ :param: nas_server_id: The ID of NAS server
+ :param: interfaces: List of file interfaces
+ :param: netbios_name: Name of the SMB server in windows network
+ :param: cifs_server_name: Name of the CIFS server
+ :param: domain: The domain name where the SMB server is registered in Active Directory
+ :param: domain_username: The domain username
+ :param: domain_password: The domain password
+ :param: workgroup: Standalone SMB server workgroup
+ :param: local_password: Standalone SMB server admin password
+ :return: Return True if CIFS server is created
+ """
+
+ LOG.info("Creating CIFS server")
+ try:
+ if not self.module.check_mode:
+ utils.UnityCifsServer.create(cli=self.unity_conn._cli, nas_server=nas_server_id, interfaces=interfaces,
+ netbios_name=netbios_name, name=cifs_server_name, domain=domain,
+ domain_username=domain_username, domain_password=domain_password,
+ workgroup=workgroup, local_password=local_password)
+ return True
+ except Exception as e:
+ msg = "Failed to create CIFS server with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_params(self):
+ """Validate the parameters
+ """
+
+ param_list = ['nas_server_id', 'nas_server_name', 'domain', 'cifs_server_id', 'cifs_server_name',
+ 'local_password', 'netbios_name', 'workgroup', 'domain_username', 'domain_password']
+
+ msg = "Please provide valid {0}"
+ for param in param_list:
+ if self.module.params[param] is not None and len(self.module.params[param].strip()) == 0:
+ errmsg = msg.format(param)
+ self.module.fail_json(msg=errmsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on CIFS server module based on parameters
+ passed in the playbook
+ """
+ cifs_server_id = self.module.params['cifs_server_id']
+ cifs_server_name = self.module.params['cifs_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_name = self.module.params['nas_server_name']
+ netbios_name = self.module.params['netbios_name']
+ workgroup = self.module.params['workgroup']
+ local_password = self.module.params['local_password']
+ domain = self.module.params['domain']
+ domain_username = self.module.params['domain_username']
+ domain_password = self.module.params['domain_password']
+ interfaces = self.module.params['interfaces']
+ unjoin_cifs_server_account = self.module.params['unjoin_cifs_server_account']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and CIFS server details
+ result = dict(
+ changed=False,
+ cifs_server_details={}
+ )
+
+ # Validate the parameters
+ self.validate_params()
+
+ if nas_server_name is not None:
+ nas_server_id = self.get_nas_server_id(nas_server_name)
+
+ cifs_server_details = self.get_details(cifs_server_id=cifs_server_id,
+ cifs_server_name=cifs_server_name,
+ netbios_name=netbios_name,
+ nas_server_id=nas_server_id)
+
+ # Check if modification is required
+ if cifs_server_details:
+ if cifs_server_id is None:
+ cifs_server_id = cifs_server_details['id']
+ modify_flag = self.is_modification_required(cifs_server_details)
+ if modify_flag:
+ self.module.fail_json(msg="Modification is not supported through Ansible module")
+
+ if not cifs_server_details and state == 'present':
+ if not nas_server_id:
+ self.module.fail_json(msg="Please provide nas server id/name to create CIFS server.")
+
+ if any([netbios_name, workgroup, local_password]) and not all([netbios_name, workgroup, local_password]):
+ msg = "netbios_name, workgroup and local_password " \
+ "are required to create standalone CIFS server."
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ result['changed'] = self.create_cifs_server(nas_server_id, interfaces, netbios_name,
+ cifs_server_name, domain, domain_username, domain_password,
+ workgroup, local_password)
+
+ if state == 'absent' and cifs_server_details:
+ skip_unjoin = None
+ if unjoin_cifs_server_account is not None:
+ skip_unjoin = not unjoin_cifs_server_account
+ result['changed'] = self.delete_cifs_server(cifs_server_id, skip_unjoin, domain_username,
+ domain_password)
+
+ if state == 'present':
+ result['cifs_server_details'] = self.get_details(cifs_server_id=cifs_server_id,
+ cifs_server_name=cifs_server_name,
+ netbios_name=netbios_name,
+ nas_server_id=nas_server_id)
+ LOG.info("Process Dict: %s", result['cifs_server_details'])
+ self.module.exit_json(**result)
+
+
+def get_id_name(cifs_server_id=None, cifs_server_name=None, netbios_name=None, nas_server_id=None):
+ """Get the id_or_name.
+ :param: cifs_server_id: The ID of CIFS server
+ :param: cifs_server_name: The name of CIFS server
+ :param: netbios_name: Name of the SMB server in windows network
+ :param: nas_server_id: The ID of NAS server
+ :return: Return id_or_name
+ """
+ if cifs_server_id:
+ id_or_name = cifs_server_id
+ elif cifs_server_name:
+ id_or_name = cifs_server_name
+ elif netbios_name:
+ id_or_name = netbios_name
+ else:
+ id_or_name = nas_server_id
+ return id_or_name
+
+
+def process_response(cifs_server_details):
+ """Process CIFS server details.
+ :param: cifs_server_details: Dict containing CIFS server details
+ :return: Processed dict containing CIFS server details
+ """
+ if cifs_server_details.existed:
+ return cifs_server_details._get_properties()
+
+
+def process_dict(cifs_server_details):
+ """Process CIFS server details.
+ :param: cifs_server_details: Dict containing CIFS server details
+ :return: Processed dict containing CIFS server details
+ """
+ param_list = ['description', 'domain', 'file_interfaces', 'health', 'id', 'is_standalone', 'name', 'nas_server'
+ 'netbios_name', 'smb_multi_channel_supported', 'smb_protocol_versions', 'smbca_supported',
+ 'workgroup', 'netbios_name']
+
+ for param in param_list:
+ if param in cifs_server_details:
+ cifs_server_details[param] = cifs_server_details[param][0]
+ return cifs_server_details
+
+
+def get_cifs_server_parameters():
+ """This method provide parameters required for the ansible
+ CIFS server module on Unity"""
+ return dict(
+ cifs_server_id=dict(), cifs_server_name=dict(),
+ netbios_name=dict(), workgroup=dict(),
+ local_password=dict(no_log=True), domain=dict(),
+ domain_username=dict(), domain_password=dict(no_log=True),
+ nas_server_name=dict(), nas_server_id=dict(),
+ interfaces=dict(type='list', elements='str'),
+ unjoin_cifs_server_account=dict(type='bool'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ )
+
+
+def main():
+ """Create Unity CIFS server object and perform action on it
+ based on user input from playbook"""
+ obj = CIFSServer()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py
new file mode 100644
index 000000000..14e4de506
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py
@@ -0,0 +1,1516 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing consistency group on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: consistencygroup
+version_added: '1.1.0'
+short_description: Manage consistency groups on Unity storage system
+description:
+- Managing the consistency group on the Unity storage system includes
+ creating new consistency group, adding volumes to consistency
+ group, removing volumes from consistency group, mapping hosts to
+ consistency group, unmapping hosts from consistency group,
+ renaming consistency group, modifying attributes of consistency group,
+ enabling replication in consistency group, disabling replication in
+ consistency group and deleting consistency group.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
+options:
+ cg_name:
+ description:
+ - The name of the consistency group.
+ - It is mandatory for the create operation.
+ - Specify either I(cg_name) or I(cg_id) (but not both) for any operation.
+ type: str
+ cg_id:
+ description:
+ - The ID of the consistency group.
+ - It can be used only for get, modify, add/remove volumes, or delete
+ operations.
+ type: str
+ volumes:
+ description:
+ - This is a list of volumes.
+ - Either the volume ID or name must be provided for adding/removing
+ existing volumes from consistency group.
+ - If I(volumes) are given, then I(vol_state) should also be specified.
+ - Volumes cannot be added/removed from consistency group, if the
+ consistency group or the volume has snapshots.
+ type: list
+ elements: dict
+ suboptions:
+ vol_id:
+ description:
+ - The ID of the volume.
+ type: str
+ vol_name:
+ description:
+ - The name of the volume.
+ type: str
+ vol_state:
+ description:
+ - String variable, describes the state of volumes inside consistency
+ group.
+ - If I(volumes) are given, then I(vol_state) should also be specified.
+ choices: [present-in-group , absent-in-group]
+ type: str
+ new_cg_name:
+ description:
+ - The new name of the consistency group, used in rename operation.
+ type: str
+ description:
+ description:
+ - Description of the consistency group.
+ type: str
+ snap_schedule:
+ description:
+ - Snapshot schedule assigned to the consistency group.
+ - Specifying an empty string "" removes the existing snapshot schedule
+ from consistency group.
+ type: str
+ tiering_policy:
+ description:
+ - Tiering policy choices for how the storage resource data will be
+ distributed among the tiers available in the pool.
+ choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']
+ type: str
+ hosts:
+ description:
+ - This is a list of hosts.
+ - Either the host ID or name must be provided for mapping/unmapping
+ hosts for a consistency group.
+ - If I(hosts) are given, then I(mapping_state) should also be specified.
+ - Hosts cannot be mapped to a consistency group, if the
+ consistency group has no volumes.
+ - When a consistency group is being mapped to the host,
+ users should not use the volume module to map the volumes
+ in the consistency group to hosts.
+ type: list
+ elements: dict
+ suboptions:
+ host_id:
+ description:
+ - The ID of the host.
+ type: str
+ host_name:
+ description:
+ - The name of the host.
+ type: str
+ mapping_state:
+ description:
+ - String variable, describes the state of hosts inside the consistency
+ group.
+ - If I(hosts) are given, then I(mapping_state) should also be specified.
+ choices: [mapped , unmapped]
+ type: str
+ replication_params:
+ description:
+ - Settings required for enabling replication.
+ type: dict
+ suboptions:
+ destination_cg_name:
+ description:
+ - Name of the destination consistency group.
+ - Default value will be source consistency group name prefixed by 'DR_'.
+ type: str
+ replication_mode:
+ description:
+ - The replication mode.
+ type: str
+ required: true
+ choices: ['asynchronous', 'manual']
+ rpo:
+ description:
+ - Maximum time to wait before the system syncs the source and destination LUNs.
+ - Option I(rpo) should be specified if the I(replication_mode) is C(asynchronous).
+ - The value should be in range of C(5) to C(1440).
+ type: int
+ replication_type:
+ description:
+ - Type of replication.
+ choices: ['local', 'remote']
+ default: local
+ type: str
+ remote_system:
+ description:
+ - Details of remote system to which the replication is being configured.
+ - The I(remote_system) option should be specified if the I(replication_type) is C(remote).
+ type: dict
+ suboptions:
+ remote_system_host:
+ required: true
+ description:
+ - IP or FQDN for remote Unity unisphere Host.
+ type: str
+ remote_system_username:
+ type: str
+ required: true
+ description:
+ - User name of remote Unity unisphere Host.
+ remote_system_password:
+ type: str
+ required: true
+ description:
+ - Password of remote Unity unisphere Host.
+ remote_system_verifycert:
+ type: bool
+ default: true
+ description:
+ - Boolean variable to specify whether or not to validate SSL
+ certificate of remote Unity unisphere Host.
+ - C(true) - Indicates that the SSL certificate should be verified.
+ - C(false) - Indicates that the SSL certificate should not be
+ verified.
+ remote_system_port:
+ description:
+ - Port at which remote Unity unisphere is hosted.
+ type: int
+ default: 443
+ destination_pool_name:
+ description:
+ - Name of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_id).
+ type: str
+ destination_pool_id:
+ description:
+ - Id of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_name).
+ type: str
+ replication_state:
+ description:
+ - State of the replication.
+ choices: ['enable', 'disable']
+ type: str
+ state:
+ description:
+ - Define whether the consistency group should exist or not.
+ choices: [absent, present]
+ required: true
+ type: str
+notes:
+ - The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ cg_name: "{{cg_name}}"
+ description: "{{description}}"
+ snap_schedule: "{{snap_schedule1}}"
+ state: "present"
+
+- name: Get details of consistency group using id
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ state: "present"
+
+- name: Add volumes to consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ volumes:
+ - vol_name: "Ansible_Test-3"
+ - vol_id: "sv_1744"
+ vol_state: "{{vol_state_present}}"
+ state: "present"
+
+- name: Rename consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{cg_name}}"
+ new_cg_name: "{{new_cg_name}}"
+ state: "present"
+
+- name: Modify consistency group details
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ snap_schedule: "{{snap_schedule2}}"
+ tiering_policy: "{{tiering_policy1}}"
+ state: "present"
+
+- name: Map hosts to a consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ hosts:
+ - host_name: "10.226.198.248"
+ - host_id: "Host_511"
+ mapping_state: "mapped"
+ state: "present"
+
+- name: Unmap hosts from a consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ hosts:
+ - host_id: "Host_511"
+ - host_name: "10.226.198.248"
+ mapping_state: "unmapped"
+ state: "present"
+
+- name: Remove volumes from consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ volumes:
+ - vol_name: "Ansible_Test-3"
+ - vol_id: "sv_1744"
+ vol_state: "{{vol_state_absent}}"
+ state: "present"
+
+- name: Delete consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ state: "absent"
+
+- name: Enable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "cg_id_1"
+ replication_params:
+ destination_cg_name: "destination_cg_1"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.1.2.3'
+ remote_system_verifycert: False
+ remote_system_username: 'username'
+ remote_system_password: 'password'
+ destination_pool_name: "pool_test_1"
+ replication_state: "enable"
+ state: "present"
+
+- name: Disable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "dis_repl_ans_source"
+ replication_state: "disable"
+ state: "present"
+"""
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+consistency_group_details:
+ description: Details of the consistency group.
+ returned: When consistency group exists
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the consistency group.
+ type: str
+ relocation_policy:
+ description: FAST VP tiering policy for the consistency group.
+ type: str
+ cg_replication_enabled:
+ description: Whether or not the replication is enabled..
+ type: bool
+ snap_schedule:
+ description: Snapshot schedule applied to consistency group.
+ type: dict
+ contains:
+ UnitySnapSchedule:
+ description: Snapshot schedule applied to consistency
+ group.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the
+ snapshot schedule.
+ type: str
+ name:
+ description: The name of the snapshot schedule.
+ type: str
+ luns:
+ description: Details of volumes part of consistency group.
+ type: dict
+ contains:
+ UnityLunList:
+ description: List of volumes part of consistency group.
+ type: list
+ contains:
+ UnityLun:
+ description: Detail of volume.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to volume.
+ type: str
+ name:
+ description: The name of the volume.
+ type: str
+ snapshots:
+ description: List of snapshots of consistency group.
+ type: list
+ contains:
+ name:
+ description: Name of the snapshot.
+ type: str
+ creation_time:
+ description: Date and time on which the snapshot was taken.
+ type: str
+ expirationTime:
+ description: Date and time after which the snapshot will expire.
+ type: str
+ storageResource:
+ description: Storage resource for which the snapshot was
+ taken.
+ type: dict
+ contains:
+ UnityStorageResource:
+ description: Details of the storage resource.
+ type: dict
+ contains:
+ id:
+ description: The id of the storage
+ resource.
+ type: str
+ block_host_access:
+ description: Details of hosts mapped to the consistency group.
+ type: dict
+ contains:
+ UnityBlockHostAccessList:
+ description: List of hosts mapped to consistency group.
+ type: list
+ contains:
+ UnityBlockHostAccess:
+ description: Details of host.
+ type: dict
+ contains:
+ id:
+ description: The ID of the host.
+ type: str
+ name:
+ description: The name of the host.
+ type: str
+ sample: {
+ "advanced_dedup_status": "DedupStatusEnum.DISABLED",
+ "block_host_access": null,
+ "cg_replication_enabled": false,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "data_reduction_status": "DataReductionStatusEnum.DISABLED",
+ "datastores": null,
+ "dedup_status": null,
+ "description": "Ansible testing",
+ "esx_filesystem_block_size": null,
+ "esx_filesystem_major_version": null,
+ "existed": true,
+ "filesystem": null,
+ "hash": 8776023812033,
+ "health": {
+ "UnityHealth": {
+ "hash": 8776023811889
+ }
+ },
+ "host_v_vol_datastore": null,
+ "id": "res_7477",
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": null,
+ "luns": null,
+ "metadata_size": 0,
+ "metadata_size_allocated": 0,
+ "name": "Ansible_CG_Testing",
+ "per_tier_size_used": null,
+ "pools": null,
+ "relocation_policy": "TieringPolicyEnum.MIXED",
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 0,
+ "size_total": 0,
+ "size_used": null,
+ "snap_count": 0,
+ "snap_schedule": null,
+ "snaps_size_allocated": 0,
+ "snaps_size_total": 0,
+ "snapshots": [],
+ "thin_status": "ThinStatusEnum.FALSE",
+ "type": "StorageResourceTypeEnum.CONSISTENCY_GROUP",
+ "virtual_volumes": null,
+ "vmware_uuid": null
+ }
+'''
+
+import logging
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('consistencygroup',
+ log_devel=logging.INFO)
+
+application_type = "Ansible/1.6.0"
+
+
+class ConsistencyGroup(object):
+ """Class with consistency group operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_consistencygroup_parameters())
+
+ mutually_exclusive = [['cg_name', 'cg_id']]
+ required_one_of = [['cg_name', 'cg_id']]
+ required_together = [['volumes', 'vol_state'], ['hosts', 'mapping_state']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ required_together=required_together
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def return_cg_instance(self, cg_name):
+ """Return the consistency group instance.
+ :param cg_name: The name of the consistency group
+ :return: Instance of the consistency group
+ """
+
+ try:
+ cg_details = self.unity_conn.get_cg(name=cg_name)
+ cg_id = cg_details.get_id()
+ cg_obj = utils.cg.UnityConsistencyGroup.get(self.unity_conn._cli,
+ cg_id)
+ return cg_obj
+
+ except Exception as e:
+ msg = "Failed to get the consistency group {0} instance with " \
+ "error {1}".format(cg_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_details(self, cg_id=None, cg_name=None):
+ """Get consistency group details.
+ :param cg_id: The id of the consistency group
+ :param cg_name: The name of the consistency group
+ :return: Dict containing consistency group details if exists
+ """
+
+ id_or_name = cg_id if cg_id else cg_name
+ errormsg = "Failed to get details of consistency group {0} with" \
+ " error {1}"
+
+ try:
+ cg_details = self.unity_conn.get_cg(_id=cg_id, name=cg_name)
+ if cg_name is None:
+ cg_name = cg_details.name
+
+ if cg_details.existed:
+ cg_obj = self.return_cg_instance(cg_name)
+ snapshots = cg_obj.snapshots
+
+ snapshot_list = [snap._get_properties() for snap in snapshots]
+
+ cg_ret_details = cg_details._get_properties()
+
+ # Append details of host mapped to the consistency group
+ # in return response
+ if cg_ret_details['block_host_access']:
+ for i in range(len(cg_details.block_host_access)):
+ cg_ret_details['block_host_access']['UnityBlockHostAccessList'][i]['UnityBlockHostAccess'][
+ 'id'] = cg_details.block_host_access[i].host.id
+ cg_ret_details['block_host_access']['UnityBlockHostAccessList'][i]['UnityBlockHostAccess'][
+ 'name'] = cg_details.block_host_access[i].host.name
+ cg_ret_details['snapshots'] = snapshot_list
+
+ # Add volume name to the dict
+ if cg_ret_details['luns'] is not None:
+ for i in range(len(cg_details.luns)):
+ cg_ret_details['luns']['UnityLunList'][i]['UnityLun'][
+ 'name'] = cg_details.luns[i].name
+
+ # Add snapshot schedule name to the dict
+ if cg_ret_details['snap_schedule'] is not None:
+ cg_ret_details['snap_schedule']['UnitySnapSchedule'][
+ 'name'] = cg_details.snap_schedule.name
+
+ # Status of cg replication
+ cg_ret_details['cg_replication_enabled'] = True if cg_details.check_cg_is_replicated() else False
+
+ return cg_ret_details
+ else:
+ LOG.info("Failed to get details of consistency group %s",
+ id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ auth_err = "Incorrect username or password, {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, auth_err)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host_id_by_name(self, host_name):
+ """ Get host ID by host name
+ :param host_name: str
+ :return: unity host ID
+ :rtype: str
+ """
+ try:
+ host_obj = self.unity_conn.get_host(name=host_name)
+ if host_obj and host_obj.existed:
+ return host_obj.id
+ else:
+ msg = "Host name: %s does not exists" % host_name
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to get host ID by name: %s error: %s" % (
+ host_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_volume_details(self, vol_name=None, vol_id=None):
+ """Get the details of a volume.
+ :param vol_name: The name of the volume
+ :param vol_id: The id of the volume
+ :return: Dict containing volume details if exists
+ """
+
+ id_or_name = vol_id if vol_id else vol_name
+
+ try:
+ lun = self.unity_conn.get_lun(name=vol_name, _id=vol_id)
+
+ cg = None
+ if lun.existed:
+ lunid = lun.get_id()
+ unitylun = utils.UnityLun.get(self.unity_conn._cli, lunid)
+ if unitylun.cg is not None:
+ cg = unitylun.cg
+ else:
+ errormsg = "The volume {0} not found.".format(id_or_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ cg_details = self.get_details(
+ cg_id=self.module.params['cg_id'],
+ cg_name=self.module.params['cg_name'])
+
+ # Check if volume is already part of another consistency group
+ if cg is None:
+ return lun._get_properties()['id']
+
+ errormsg = "The volume {0} is already part of consistency group" \
+ " {1}".format(id_or_name, cg.name)
+
+ if cg_details is None:
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if cg.id != cg_details['id']:
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ return lun._get_properties()['id']
+
+ except Exception as e:
+ msg = "Failed to get the volume {0} with error {1}".format(
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def remove_volumes_from_cg(self, cg_name, volumes):
+ """Remove volumes from consistency group.
+ :param cg_name: The name of the consistency group
+ :param volumes: The list of volumes to be removed
+ :return: Boolean value to indicate if volumes are removed from
+ consistency group
+ """
+
+ cg_details = self.unity_conn.get_cg(name=cg_name)._get_properties()
+ existing_volumes_in_cg = cg_details['luns']
+ existing_vol_ids = []
+
+ if existing_volumes_in_cg:
+ existing_vol_ids = [vol['UnityLun']['id'] for vol in
+ existing_volumes_in_cg['UnityLunList']]
+
+ ids_to_remove = []
+ vol_name_list = []
+ vol_id_list = []
+
+ for vol in volumes:
+ if 'vol_id' in vol and not (vol['vol_id'] in vol_id_list):
+ vol_id_list.append(vol['vol_id'])
+ elif 'vol_name' in vol and not (vol['vol_name'] in vol_name_list):
+ vol_name_list.append(vol['vol_name'])
+
+ """remove volume by name"""
+ for vol in vol_name_list:
+ ids_to_remove.append(self.get_volume_details(vol_name=vol))
+
+ vol_id_list = list(set(vol_id_list + ids_to_remove))
+ ids_to_remove = list(set(existing_vol_ids).intersection(set(vol_id_list)))
+
+ LOG.info("Volume IDs to remove %s", ids_to_remove)
+
+ if len(ids_to_remove) == 0:
+ return False
+
+ vol_remove_list = []
+ for vol in ids_to_remove:
+ vol_dict = {"id": vol}
+ vol_remove_list.append(vol_dict)
+
+ cg_obj = self.return_cg_instance(cg_name)
+
+ try:
+ cg_obj.modify(lun_remove=vol_remove_list)
+ return True
+ except Exception as e:
+ errormsg = "Remove existing volumes from consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def add_volumes_to_cg(self, cg_name, volumes, tiering_policy):
+ """Add volumes to consistency group.
+ :param cg_name: The name of the consistency group
+ :param volumes: The list of volumes to be added to consistency
+ group
+ :param tiering_policy: The tiering policy that is to be applied to
+ consistency group
+ :return: The boolean value to indicate if volumes are added to
+ consistency group
+ """
+
+ cg_details = self.unity_conn.get_cg(name=cg_name)._get_properties()
+ existing_volumes_in_cg = cg_details['luns']
+ existing_vol_ids = []
+
+ if existing_volumes_in_cg:
+ existing_vol_ids = [vol['UnityLun']['id'] for vol in
+ existing_volumes_in_cg['UnityLunList']]
+
+ ids_to_add = []
+ vol_name_list = []
+ vol_id_list = []
+ all_vol_ids = []
+
+ for vol in volumes:
+ if 'vol_id' in vol and not (vol['vol_id'] in vol_id_list):
+ vol_id_list.append(vol['vol_id'])
+ elif 'vol_name' in vol and not (vol['vol_name'] in vol_name_list):
+ vol_name_list.append(vol['vol_name'])
+
+ """add volume by name"""
+ for vol in vol_name_list:
+ ids_to_add.append(self.get_volume_details(vol_name=vol))
+
+ """add volume by id"""
+ for vol in vol_id_list:
+ """verifying if volume id exists in array"""
+ ids_to_add.append(self.get_volume_details(vol_id=vol))
+
+ all_vol_ids = ids_to_add + existing_vol_ids
+ ids_to_add = list(set(all_vol_ids) - set(existing_vol_ids))
+
+ LOG.info("Volume IDs to add %s", ids_to_add)
+
+ if len(ids_to_add) == 0:
+ return False
+
+ vol_add_list = []
+ for vol in ids_to_add:
+ vol_dict = {"id": vol}
+ vol_add_list.append(vol_dict)
+
+ cg_obj = self.return_cg_instance(cg_name)
+
+ policy_enum = None
+ if tiering_policy:
+ if utils.TieringPolicyEnum[tiering_policy]:
+ policy_enum = utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ try:
+ cg_obj.modify(lun_add=vol_add_list, tiering_policy=policy_enum)
+ return True
+ except Exception as e:
+ errormsg = "Add existing volumes to consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def map_hosts_to_cg(self, cg_name, add_hosts):
+ """Map hosts to consistency group.
+ :param cg_name: The name of the consistency group
+ :param add_hosts: List of hosts that are to be mapped to cg
+ :return: Boolean value to indicate if hosts were mapped to cg
+ """
+ cg_details = self.unity_conn.get_cg(name=cg_name)
+ existing_volumes_in_cg = cg_details.luns
+
+ existing_hosts_in_cg = cg_details.block_host_access
+ existing_host_ids = []
+
+ """Get list of existing hosts in consistency group"""
+ if existing_hosts_in_cg:
+ for i in range(len(existing_hosts_in_cg)):
+ existing_host_ids.append(existing_hosts_in_cg[i].host.id)
+
+ host_id_list = []
+ host_name_list = []
+ add_hosts_id = []
+ host_add_list = []
+ all_hosts = []
+
+ for host in add_hosts:
+ if 'host_id' in host and not (host['host_id'] in host_id_list):
+ host_id_list.append(host['host_id'])
+ elif 'host_name' in host and not (host['host_name'] in host_name_list):
+ host_name_list.append(host['host_name'])
+
+ """add hosts by name"""
+ for host_name in host_name_list:
+ add_hosts_id.append(self.get_host_id_by_name(host_name))
+
+ all_hosts = host_id_list + existing_host_ids + add_hosts_id
+ add_hosts_id = list(set(all_hosts) - set(existing_host_ids))
+
+ if len(add_hosts_id) == 0:
+ return False
+
+ if existing_volumes_in_cg:
+
+ for host_id in add_hosts_id:
+ host_dict = {"id": host_id}
+ host_add_list.append(host_dict)
+
+ LOG.info("List of hosts to be added to consistency group "
+ "%s ", host_add_list)
+ cg_obj = self.return_cg_instance(cg_name)
+ try:
+ cg_obj.modify(name=cg_name, host_add=host_add_list)
+ return True
+ except Exception as e:
+ errormsg = "Adding host to consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def unmap_hosts_to_cg(self, cg_name, remove_hosts):
+ """Unmap hosts to consistency group.
+ :param cg_name: The name of the consistency group
+ :param remove_hosts: List of hosts that are to be unmapped from cg
+ :return: Boolean value to indicate if hosts were mapped to cg
+ """
+ cg_details = self.unity_conn.get_cg(name=cg_name)
+ existing_hosts_in_cg = cg_details.block_host_access
+ existing_host_ids = []
+
+ """Get host ids existing in consistency group"""
+ if existing_hosts_in_cg:
+ for i in range(len(existing_hosts_in_cg)):
+ existing_host_ids.append(existing_hosts_in_cg[i].host.id)
+
+ host_remove_list = []
+ host_id_list = []
+ host_name_list = []
+ remove_hosts_id = []
+
+ for host in remove_hosts:
+ if 'host_id' in host and not (host['host_id'] in host_id_list):
+ host_id_list.append(host['host_id'])
+ elif 'host_name' in host and not (host['host_name'] in host_name_list):
+ host_name_list.append(host['host_name'])
+
+ """remove hosts by name"""
+ for host in host_name_list:
+ remove_hosts_id.append(self.get_host_id_by_name(host))
+
+ host_id_list = list(set(host_id_list + remove_hosts_id))
+ remove_hosts_id = list(set(existing_host_ids).intersection(set(host_id_list)))
+
+ if len(remove_hosts_id) == 0:
+ return False
+
+ for host in remove_hosts_id:
+ host_dict = {"id": host}
+ host_remove_list.append(host_dict)
+ cg_obj = self.return_cg_instance(cg_name)
+ try:
+ cg_obj.modify(name=cg_name, host_remove=host_remove_list)
+ return True
+ except Exception as e:
+ errormsg = "Removing host from consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def rename_cg(self, cg_name, new_cg_name):
+ """Rename consistency group.
+ :param cg_name: The name of the consistency group
+ :param new_cg_name: The new name of the consistency group
+ :return: Boolean value to indicate if consistency group renamed
+ """
+ cg_obj = self.return_cg_instance(cg_name)
+
+ try:
+ cg_obj.modify(name=new_cg_name)
+ return True
+ except Exception as e:
+ errormsg = "Rename operation of consistency group {0} failed " \
+ "with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def is_cg_modified(self, cg_details):
+ """Check if the desired consistency group state is different from
+ existing consistency group.
+ :param cg_details: The dict containing consistency group details
+ :return: Boolean value to indicate if modification is needed
+ """
+ modified = False
+
+ if self.module.params['tiering_policy'] and cg_details['luns'] is \
+ None and self.module.params['volumes'] is None:
+ self.module.fail_json(msg="The system cannot assign a tiering"
+ " policy to an empty consistency group."
+ )
+
+ if self.module.params['hosts'] and cg_details['luns'] is \
+ None and self.module.params['volumes'] is None:
+ self.module.fail_json(msg="The system cannot assign hosts"
+ " to an empty consistency group.")
+
+ if ((cg_details['description'] is not None and
+ self.module.params['description'] is not None and
+ cg_details['description'] != self.module.params['description'])
+ or (cg_details['description'] is None and
+ self.module.params['description'] is not None)) or \
+ ((cg_details['snap_schedule'] is not None and
+ self.module.params['snap_schedule'] is not None and
+ cg_details['snap_schedule']['UnitySnapSchedule']['name'] !=
+ self.module.params['snap_schedule']) or
+ (cg_details['snap_schedule'] is None and
+ self.module.params['snap_schedule'])):
+ modified = True
+
+ if cg_details['relocation_policy']:
+ tier_policy = cg_details['relocation_policy'].split('.')
+ if self.module.params['tiering_policy'] is not None and \
+ tier_policy[1] != self.module.params['tiering_policy']:
+ modified = True
+
+ return modified
+
+ def create_cg(self, cg_name, description, snap_schedule):
+ """Create a consistency group.
+ :param cg_name: The name of the consistency group
+ :param description: The description of the consistency group
+ :param snap_schedule: The name of the snapshot schedule
+ :return: The boolean value to indicate if consistency group
+ created and also returns the CG object
+ """
+
+ try:
+ if snap_schedule is not None:
+ snap_schedule = {"name": snap_schedule}
+
+ cg_obj = utils.cg.UnityConsistencyGroup.create(
+ self.unity_conn._cli, name=cg_name, description=description,
+ snap_schedule=snap_schedule)
+ return True, cg_obj
+ except Exception as e:
+ errormsg = "Create operation of consistency group {0} failed" \
+ " with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_cg(self, cg_name, description, snap_schedule, tiering_policy):
+ """Modify consistency group.
+ :param cg_name: The name of the consistency group
+ :param description: The description of the consistency group
+ :param snap_schedule: The name of the snapshot schedule
+ :param tiering_policy: The tiering policy that is to be applied to
+ consistency group
+ :return: The boolean value to indicate if consistency group
+ modified
+ """
+ cg_obj = self.return_cg_instance(cg_name)
+ is_snap_schedule_paused = None
+
+ if self.module.params['snap_schedule'] == "":
+ is_snap_schedule_paused = False
+
+ if snap_schedule is not None:
+ if snap_schedule == "":
+ snap_schedule = {"name": None}
+ else:
+ snap_schedule = {"name": snap_schedule}
+
+ policy_enum = None
+ if tiering_policy:
+ if utils.TieringPolicyEnum[tiering_policy]:
+ policy_enum = utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ try:
+ cg_obj.modify(description=description,
+ snap_schedule=snap_schedule,
+ tiering_policy=policy_enum,
+ is_snap_schedule_paused=is_snap_schedule_paused)
+ return True
+
+ except Exception as e:
+ errormsg = "Modify operation of consistency group {0} failed " \
+ "with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_cg(self, cg_name):
+ """Delete consistency group.
+ :param cg_name: The name of the consistency group
+ :return: The boolean value to indicate if consistency group deleted
+ """
+ cg_obj = self.return_cg_instance(cg_name)
+
+ try:
+ cg_obj.delete()
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of consistency group {0} failed " \
+ "with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def refine_volumes(self, volumes):
+ """Refine volumes.
+ :param volumes: Volumes that is to be added/removed
+ :return: List of volumes with each volume being identified with either
+ vol_id or vol_name
+ """
+ for vol in volumes:
+ if vol['vol_id'] is not None and vol['vol_name'] is None:
+ del vol['vol_name']
+ elif vol['vol_name'] is not None and vol['vol_id'] is None:
+ del vol['vol_id']
+ return volumes
+
+ def refine_hosts(self, hosts):
+ """Refine hosts.
+ :param hosts: Hosts that is to be mapped/unmapped
+ :return: List of hosts with each host being identified with either
+ host_id or host_name
+ """
+ for host in hosts:
+ if host['host_id'] is not None and host['host_name'] is None:
+ del host['host_name']
+ elif host['host_name'] is not None and host['host_id'] is None:
+ del host['host_id']
+ return hosts
+
+ def validate_volumes(self, volumes):
+ """Validate the volumes.
+ :param volumes: List of volumes
+ """
+
+ for vol in volumes:
+ if ('vol_id' in vol) and ('vol_name' in vol):
+ errormsg = "Both name and id are found for volume {0}. No" \
+ " action would be taken. Please specify either" \
+ " name or id.".format(vol)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'vol_id' in vol and (len(vol['vol_id'].strip()) == 0):
+ errormsg = "vol_id is blank. Please specify valid vol_id."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'vol_name' in vol and (len(vol.get('vol_name').strip()) == 0):
+ errormsg = "vol_name is blank. Please specify valid vol_name."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'vol_name' in vol:
+ self.get_volume_details(vol_name=vol['vol_name'])
+ elif 'vol_id' in vol:
+ self.get_volume_details(vol_id=vol['vol_id'])
+ else:
+ errormsg = "Expected either vol_name or vol_id, found" \
+ " neither for volume {0}".format(vol)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_hosts(self, hosts):
+ """Validate hosts.
+ :param hosts: List of hosts
+ """
+
+ for host in hosts:
+ if ('host_id' in host) and ('host_name' in host):
+ errormsg = "Both name and id are found for host {0}. No" \
+ " action would be taken. Please specify either" \
+ " name or id.".format(host)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'host_id' in host and (len(host['host_id'].strip()) == 0):
+ errormsg = "host_id is blank. Please specify valid host_id."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'host_name' in host and (len(host.get('host_name').strip()) == 0):
+ errormsg = "host_name is blank. Please specify valid host_name."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'host_name' in host:
+ self.get_host_id_by_name(host_name=host['host_name'])
+ elif 'host_id' in host:
+ host_obj = self.unity_conn.get_host(_id=host['host_id'])
+ if host_obj is None or host_obj.existed is False:
+ msg = "Host id: %s does not exists" % host['host_id']
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ else:
+ errormsg = "Expected either host_name or host_id, found" \
+ " neither for host {0}".format(host)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def update_replication_params(self, replication):
+ ''' Update replication params '''
+
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ connection_params = {
+ 'unispherehost': replication['remote_system']['remote_system_host'],
+ 'username': replication['remote_system']['remote_system_username'],
+ 'password': replication['remote_system']['remote_system_password'],
+ 'validate_certs': replication['remote_system']['remote_system_verifycert'],
+ 'port': replication['remote_system']['remote_system_port']
+ }
+ remote_system_conn = utils.get_unity_unisphere_connection(
+ connection_params, application_type)
+ replication['remote_system_name'] = remote_system_conn.name
+ if replication['destination_pool_name'] is not None:
+ pool_object = remote_system_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+ else:
+ if replication['destination_pool_name'] is not None:
+ pool_object = self.unity_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+
+ def get_destination_cg_luns(self, source_lun_list):
+ ''' Form destination cg lun list '''
+ destination_cg_lun_list = []
+ if source_lun_list is not None:
+ for source_lun in source_lun_list:
+ destination_cg_lun_info = utils.UnityStorageResource()
+ destination_cg_lun_info.name = "DR_" + source_lun.name
+ destination_cg_lun_info.is_thin_enabled = source_lun.is_thin_enabled
+ destination_cg_lun_info.size_total = source_lun.size_total
+ destination_cg_lun_info.id = source_lun.id
+ destination_cg_lun_info.is_data_reduction_enabled = source_lun.is_data_reduction_enabled
+ destination_cg_lun_list.append(destination_cg_lun_info)
+ return destination_cg_lun_list
+
+ def enable_cg_replication(self, cg_name, replication):
+ ''' Add replication to the consistency group '''
+ try:
+ # Validate replication params
+ self.validate_cg_replication_params(replication)
+
+ # Get cg instance
+ cg_object = self.return_cg_instance(cg_name)
+
+ # Check if replication is enabled for cg
+ if cg_object.check_cg_is_replicated():
+ return False
+
+ # Update replication params
+ self.update_replication_params(replication)
+
+ # Get destination pool id
+ replication_args_list = {
+ 'dst_pool_id': replication['destination_pool_id']
+ }
+
+ # Get replication mode
+ if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous':
+ replication_args_list['max_time_out_of_sync'] = replication['rpo']
+ else:
+ replication_args_list['max_time_out_of_sync'] = -1
+
+ # Get remote system
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ remote_system_name = replication['remote_system_name']
+ remote_system_list = self.unity_conn.get_remote_system()
+ for remote_system in remote_system_list:
+ if remote_system.name == remote_system_name:
+ replication_args_list['remote_system'] = remote_system
+ break
+ if 'remote_system' not in replication_args_list.keys():
+ errormsg = "Remote system %s is not found" % (remote_system_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Form destination LUNs list
+ source_lun_list = cg_object.luns
+ replication_args_list['source_luns'] = self.get_destination_cg_luns(source_lun_list)
+
+ # Form destination cg name
+ if 'destination_cg_name' in replication and replication['destination_cg_name'] is not None:
+ replication_args_list['dst_cg_name'] = replication['destination_cg_name']
+ else:
+ replication_args_list['dst_cg_name'] = "DR_" + cg_object.name
+
+ LOG.info(("Enabling replication to the consistency group %s", cg_object.name))
+ cg_object.replicate_cg_with_dst_resource_provisioning(**replication_args_list)
+ return True
+ except Exception as e:
+ errormsg = "Enabling replication to the consistency group %s failed " \
+ "with error %s" % (cg_object.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def disable_cg_replication(self, cg_name):
+ ''' Remove replication from the consistency group '''
+ try:
+ # Get cg instance
+ cg_object = self.return_cg_instance(cg_name)
+
+ # Check if replication is enabled for cg
+ if not cg_object.check_cg_is_replicated():
+ return False
+
+ LOG.info(("Disabling replication from the consistency group %s", cg_object.name))
+ curr_cg_repl_session = self.unity_conn.get_replication_session(src_resource_id=cg_object.id)
+ for repl_session in curr_cg_repl_session:
+ repl_session.delete()
+ return True
+ except Exception as e:
+ errormsg = "Disabling replication to the consistency group %s failed " \
+ "with error %s" % (cg_object.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on consistency group module based on
+ parameters chosen in playbook
+ """
+ cg_name = self.module.params['cg_name']
+ cg_id = self.module.params['cg_id']
+ description = self.module.params['description']
+ volumes = self.module.params['volumes']
+ snap_schedule = self.module.params['snap_schedule']
+ new_cg_name = self.module.params['new_cg_name']
+ tiering_policy = self.module.params['tiering_policy']
+ vol_state = self.module.params['vol_state']
+ hosts = self.module.params['hosts']
+ mapping_state = self.module.params['mapping_state']
+ replication = self.module.params['replication_params']
+ replication_state = self.module.params['replication_state']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and consistency
+ # group details
+ result = dict(
+ changed=False,
+ create_cg='',
+ modify_cg='',
+ rename_cg='',
+ add_vols_to_cg='',
+ remove_vols_from_cg='',
+ delete_cg='',
+ add_hosts_to_cg='',
+ remove_hosts_from_cg='',
+ consistency_group_details={}
+ )
+ cg_details = self.get_details(cg_id=cg_id, cg_name=cg_name)
+
+ if cg_name is None and cg_details:
+ cg_id = None
+ cg_name = cg_details['name']
+ if volumes:
+ volumes = self.refine_volumes(volumes)
+ self.validate_volumes(volumes)
+ if hosts:
+ hosts = self.refine_hosts(hosts)
+ self.validate_hosts(hosts)
+
+ modified = False
+
+ if cg_details:
+ modified = self.is_cg_modified(cg_details)
+
+ if vol_state and not volumes:
+ self.module.fail_json(msg="Please specify volumes along with vol_state")
+
+ if mapping_state and not hosts:
+ self.module.fail_json(msg="Please specify hosts along with mapping_state")
+
+ if replication and replication_state is None:
+ self.module.fail_json(msg="Please specify replication_state along with replication_params")
+
+ if state == 'present' and not cg_details:
+ if not volumes and tiering_policy:
+ self.module.fail_json(msg="The system cannot assign a"
+ " tiering policy to an empty"
+ " consistency group")
+ if not volumes and hosts:
+ self.module.fail_json(msg="The system cannot assign"
+ " hosts to an empty"
+ " consistency group")
+
+ if not cg_name:
+ msg = "The parameter cg_name length is 0. It is too short." \
+ " The min length is 1."
+ self.module.fail_json(msg=msg)
+
+ if new_cg_name:
+ self.module.fail_json(msg="Invalid argument, new_cg_name is"
+ " not required")
+
+ result['create_cg'], cg_details = self.create_cg(
+ cg_name, description, snap_schedule)
+ elif state == 'absent' and cg_details:
+ if cg_details['cg_replication_enabled']:
+ self.module.fail_json(msg="Consistency group cannot be deleted"
+ " because it is participating"
+ " in a replication session.")
+ if cg_details['luns']:
+ self.module.fail_json(msg="Please remove all volumes which"
+ " are part of consistency group"
+ " before deleting it.")
+ result['delete_cg'] = self.delete_cg(cg_name)
+
+ if state == 'present' and vol_state == 'present-in-group' and \
+ cg_details and volumes:
+ result['add_vols_to_cg'] = self.add_volumes_to_cg(cg_name,
+ volumes,
+ tiering_policy)
+ elif state == 'present' and vol_state == 'absent-in-group' and \
+ cg_details and volumes:
+ result['remove_vols_from_cg'] = self.remove_volumes_from_cg(
+ cg_name, volumes)
+
+ if hosts and mapping_state == 'mapped' and \
+ cg_details:
+ result['add_hosts_to_cg'] = self.map_hosts_to_cg(cg_name, hosts)
+
+ if hosts and mapping_state == 'unmapped' and \
+ cg_details:
+ result['remove_hosts_from_cg'] = self.unmap_hosts_to_cg(cg_name, hosts)
+
+ if state == 'present' and new_cg_name is not None:
+ if not new_cg_name:
+ msg = "The parameter new_cg_name length is 0. It is too" \
+ " short. The min length is 1."
+ self.module.fail_json(msg=msg)
+
+ if cg_name != new_cg_name:
+ result['rename_cg'] = self.rename_cg(cg_name, new_cg_name)
+ cg_name = new_cg_name
+
+ if state == 'present' and cg_details and modified:
+ result['modify_cg'] = self.modify_cg(cg_name, description,
+ snap_schedule, tiering_policy
+ )
+
+ if state == 'present' and cg_details and replication_state is not None:
+ if replication_state == 'enable':
+ result['changed'] = self.enable_cg_replication(cg_name, replication)
+ else:
+ result['changed'] = self.disable_cg_replication(cg_name)
+
+ if result['create_cg'] or result['modify_cg'] or result[
+ 'add_vols_to_cg'] or result['remove_vols_from_cg'] or result[
+ 'delete_cg'] or result['rename_cg'] or result[
+ 'add_hosts_to_cg'] or result['remove_hosts_from_cg']:
+ result['changed'] = True
+
+ result['consistency_group_details'] = self.get_details(cg_id=cg_id,
+ cg_name=cg_name
+ )
+
+ self.module.exit_json(**result)
+
+ def validate_destination_pool_info(self, replication):
+ if replication['destination_pool_id'] is not None and replication['destination_pool_name'] is not None:
+ errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if replication['destination_pool_id'] is None and replication['destination_pool_name'] is None:
+ errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_replication_mode(self, replication):
+ if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous':
+ if replication['rpo'] is None:
+ errormsg = "rpo is required together with 'asynchronous' replication_mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if replication['rpo'] < 5 or replication['rpo'] > 1440:
+ errormsg = "rpo value should be in range of 5 to 1440"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_cg_replication_params(self, replication):
+ ''' Validate cg replication params '''
+ # Valdiate replication
+ if replication is None:
+ errormsg = "Please specify replication_params to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ else:
+ self.validate_destination_pool_info(replication)
+ self.validate_replication_mode(replication)
+ # Validate replication type
+ if replication['replication_type'] == 'remote' and replication['remote_system'] is None:
+ errormsg = "remote_system is required together with 'remote' replication_type"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ # Validate destination cg name
+ if 'destination_cg_name' in replication and replication['destination_cg_name'] is not None:
+ dst_cg_name_length = len(replication['destination_cg_name'])
+ if dst_cg_name_length == 0 or dst_cg_name_length > 95:
+ errormsg = "destination_cg_name value should be in range of 1 to 95"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+
+def get_consistencygroup_parameters():
+ """This method provide parameters required for the ansible consistency
+ group module on Unity"""
+ return dict(
+ cg_name=dict(required=False, type='str'),
+ cg_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list', elements='dict',
+ options=dict(
+ vol_name=dict(type='str'),
+ vol_id=dict(type='str')
+ )
+ ),
+ snap_schedule=dict(required=False, type='str'),
+ new_cg_name=dict(required=False, type='str'),
+ tiering_policy=dict(required=False, type='str', choices=[
+ 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
+ vol_state=dict(required=False, type='str',
+ choices=['present-in-group', 'absent-in-group']),
+ hosts=dict(required=False, type='list', elements='dict',
+ options=dict(
+ host_name=dict(type='str'),
+ host_id=dict(type='str')
+ )),
+ mapping_state=dict(required=False, type='str',
+ choices=['mapped', 'unmapped']),
+ replication_params=dict(type='dict', options=dict(
+ destination_cg_name=dict(type='str'),
+ replication_mode=dict(type='str', choices=['asynchronous', 'manual'], required=True),
+ rpo=dict(type='int'),
+ replication_type=dict(type='str', choices=['local', 'remote'], default='local'),
+ remote_system=dict(type='dict',
+ options=dict(
+ remote_system_host=dict(type='str', required=True, no_log=True),
+ remote_system_verifycert=dict(type='bool', required=False,
+ default=True),
+ remote_system_username=dict(type='str', required=True),
+ remote_system_password=dict(type='str', required=True, no_log=True),
+ remote_system_port=dict(type='int', required=False, default=443, no_log=True)
+ )),
+ destination_pool_name=dict(type='str'),
+ destination_pool_id=dict(type='str')
+ )),
+ replication_state=dict(type='str', choices=['enable', 'disable']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity consistency group object and perform action on it
+ based on user input from playbook"""
+ obj = ConsistencyGroup()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py
new file mode 100644
index 000000000..b10f85386
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py
@@ -0,0 +1,1906 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing FileSystem on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+
+module: filesystem
+version_added: '1.1.0'
+short_description: Manage filesystem on Unity storage system
+description:
+- Managing filesystem on Unity storage system includes
+ Create new filesystem,
+ Modify snapschedule attribute of filesystem,
+ Modify filesystem attributes,
+ Display filesystem details,
+ Display filesystem snapshots,
+ Display filesystem snapschedule,
+ Delete snapschedule associated with the filesystem,
+ Delete filesystem,
+ Create new filesystem with quota configuration,
+ Enable, modify and disable replication.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Arindam Datta (@dattaarindam) <ansible.team@dell.com>
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+
+options:
+ filesystem_name:
+ description:
+ - The name of the filesystem. Mandatory only for the create operation.
+ All the operations are supported through I(filesystem_name).
+ - It is mutually exclusive with I(filesystem_id).
+ type: str
+ filesystem_id:
+ description:
+ - The id of the filesystem.
+ - It can be used only for get, modify, or delete operations.
+ - It is mutually exclusive with I(filesystem_name).
+ type: str
+ pool_name:
+ description:
+ - This is the name of the pool where the filesystem will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new
+ filesystem.
+ type: str
+ pool_id:
+ description:
+ - This is the ID of the pool where the filesystem will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new
+ filesystem.
+ type: str
+ size:
+ description:
+ - The size of the filesystem.
+ type: int
+ cap_unit:
+ description:
+ - The unit of the filesystem size. It defaults to C(GB), if not specified.
+ choices: ['GB' , 'TB']
+ type: str
+ nas_server_name:
+ description:
+ - Name of the NAS server on which filesystem will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which filesystem will be hosted.
+ type: str
+ supported_protocols:
+ description:
+ - Protocols supported by the file system.
+ - It will be overridden by NAS server configuration if NAS Server is C(Multiprotocol).
+ type: str
+ choices: ['NFS', 'CIFS', 'MULTIPROTOCOL']
+ description:
+ description:
+ - Description about the filesystem.
+ - Description can be removed by passing empty string ("").
+ type: str
+ smb_properties:
+ description:
+ - Advance settings for SMB. It contains optional candidate variables.
+ type: dict
+ suboptions:
+ is_smb_sync_writes_enabled:
+ description:
+ - Indicates whether the synchronous writes option is enabled on the
+ file system.
+ type: bool
+ is_smb_notify_on_access_enabled:
+ description:
+ - Indicates whether notifications of changes to directory file
+ structure are enabled.
+ type: bool
+ is_smb_op_locks_enabled:
+ description:
+ - Indicates whether opportunistic file locking is enabled on the file
+ system.
+ type: bool
+ is_smb_notify_on_write_enabled:
+ description:
+ - Indicates whether file write notifications are enabled on the file
+ system.
+ type: bool
+ smb_notify_on_change_dir_depth:
+ description:
+ - Integer variable, determines the lowest directory level to which
+ the enabled notifications apply.
+ - Minimum value is C(1).
+ type: int
+ data_reduction:
+ description:
+ - Boolean variable, specifies whether or not to enable compression.
+ Compression is supported only for thin filesystem.
+ type: bool
+ is_thin:
+ description:
+ - Boolean variable, specifies whether or not it is a thin filesystem.
+ type: bool
+ access_policy:
+ description:
+ - Access policy of a filesystem.
+ choices: ['NATIVE', 'UNIX', 'WINDOWS']
+ type: str
+ locking_policy:
+ description:
+ - File system locking policies. These policy choices control whether the
+ NFSv4 range locks must be honored.
+ type: str
+ choices: ['ADVISORY', 'MANDATORY']
+ tiering_policy:
+ description:
+ - Tiering policy choices for how the storage resource data will be
+ distributed among the tiers available in the pool.
+ choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']
+ type: str
+ quota_config:
+ description:
+ - Configuration for quota management. It contains optional parameters.
+ type: dict
+ suboptions:
+ grace_period:
+ description:
+ - Grace period set in quota configuration after soft limit is reached.
+ - If I(grace_period) is not set during creation of filesystem,
+ it will be set to C(7 days) by default.
+ type: int
+ grace_period_unit:
+ description:
+ - Unit of grace period.
+ - Default unit is C(days).
+ type: str
+ choices: ['minutes', 'hours', 'days']
+ default_hard_limit:
+ description:
+ - Default hard limit for user quotas and tree quotas.
+ - If I(default_hard_limit) is not set while creation of filesystem,
+ it will be set to C(0B) by default.
+ type: int
+ default_soft_limit:
+ description:
+ - Default soft limit for user quotas and tree quotas.
+ - If I(default_soft_limit) is not set while creation of filesystem,
+ it will be set to C(0B) by default.
+ type: int
+ is_user_quota_enabled:
+ description:
+ - Indicates whether the user quota is enabled.
+ - If I(is_user_quota_enabled) is not set while creation of filesystem,
+ it will be set to C(false) by default.
+ - Parameters I(is_user_quota_enabled) and I(quota_policy) are
+ mutually exclusive.
+ type: bool
+ quota_policy:
+ description:
+ - Quota policy set in quota configuration.
+ - If I(quota_policy) is not set while creation of filesystem, it will
+ be set to C(FILE_SIZE) by default.
+ - Parameters I(is_user_quota_enabled) and I(quota_policy) are
+ mutually exclusive.
+ choices: ['FILE_SIZE','BLOCKS']
+ type: str
+ cap_unit:
+ description:
+ - Unit of I(default_soft_limit) and I(default_hard_limit) size.
+ - Default unit is C(GB).
+ choices: ['MB', 'GB', 'TB']
+ type: str
+ state:
+ description:
+ - State variable to determine whether filesystem will exist or not.
+ choices: ['absent', 'present']
+ required: true
+ type: str
+ snap_schedule_name:
+ description:
+ - This is the name of an existing snapshot schedule which is to be associated with the filesystem.
+ - This is mutually exclusive with I(snapshot_schedule_id).
+ type: str
+ snap_schedule_id:
+ description:
+ - This is the id of an existing snapshot schedule which is to be associated with the filesystem.
+ - This is mutually exclusive with I(snapshot_schedule_name).
+ type: str
+ replication_params:
+ description:
+ - Settings required for enabling or modifying replication.
+ type: dict
+ suboptions:
+ replication_name:
+ description:
+ - Name of the replication session.
+ type: str
+ new_replication_name:
+ description:
+ - Replication name to rename the session to.
+ type: str
+ replication_mode:
+ description:
+ - The replication mode.
+ - This is a mandatory field while creating a replication session.
+ type: str
+ choices: ['synchronous', 'asynchronous', 'manual']
+ rpo:
+ description:
+ - Maximum time to wait before the system syncs the source and destination LUNs.
+ - The I(rpo) option should be specified if the I(replication_mode) is C(asynchronous).
+ - The value should be in range of C(5) to C(1440) for C(asynchronous),
+ C(0) for C(synchronous) and C(-1) for C(manual).
+ type: int
+ replication_type:
+ description:
+ - Type of replication.
+ choices: ['local', 'remote']
+ type: str
+ remote_system:
+ description:
+ - Details of remote system to which the replication is being configured.
+ - The I(remote_system) option should be specified if the I(replication_type) is C(remote).
+ type: dict
+ suboptions:
+ remote_system_host:
+ required: true
+ description:
+ - IP or FQDN for remote Unity unisphere Host.
+ type: str
+ remote_system_username:
+ type: str
+ required: true
+ description:
+ - User name of remote Unity unisphere Host.
+ remote_system_password:
+ type: str
+ required: true
+ description:
+ - Password of remote Unity unisphere Host.
+ remote_system_verifycert:
+ type: bool
+ default: true
+ description:
+ - Boolean variable to specify whether or not to validate SSL
+ certificate of remote Unity unisphere Host.
+ - C(true) - Indicates that the SSL certificate should be verified.
+ - C(false) - Indicates that the SSL certificate should not be
+ verified.
+ remote_system_port:
+ description:
+ - Port at which remote Unity unisphere is hosted.
+ type: int
+ default: 443
+ destination_pool_id:
+ type: str
+ description:
+ - ID of pool to allocate destination filesystem.
+ destination_pool_name:
+ type: str
+ description:
+ - Name of pool to allocate destination filesystem.
+ replication_state:
+ description:
+ - State of the replication.
+ choices: ['enable', 'disable']
+ type: str
+
+notes:
+- SMB shares, NFS exports, and snapshots associated with filesystem need
+ to be deleted prior to deleting a filesystem.
+- The I(quota_config) parameter can be used to update default hard limit
+ and soft limit values to limit the maximum space that can be used.
+ By default they both are set to 0 during filesystem
+ creation which means unlimited.
+- The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create FileSystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ pool_name: "pool_1"
+ size: 5
+ state: "present"
+
+- name: Create FileSystem with quota configuration
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ pool_name: "pool_1"
+ size: 5
+ quota_config:
+ grace_period: 8
+ grace_period_unit: "days"
+ default_soft_limit: 10
+ is_user_quota_enabled: False
+ state: "present"
+
+- name: Expand FileSystem size
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ size: 10
+ state: "present"
+
+- name: Expand FileSystem size
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ size: 10
+ state: "present"
+
+- name: Modify FileSystem smb_properties
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ smb_properties:
+ is_smb_op_locks_enabled: True
+ smb_notify_on_change_dir_depth: 5
+ is_smb_notify_on_access_enabled: True
+ state: "present"
+
+- name: Modify FileSystem Snap Schedule
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_141"
+ snap_schedule_id: "{{snap_schedule_id}}"
+ state: "{{state_present}}"
+
+- name: Get details of FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ state: "present"
+
+- name: Delete a FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ state: "absent"
+
+- name: Enable replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_repl"
+ replication_type: "remote"
+ replication_mode: "asynchronous"
+ rpo: 60
+ remote_system:
+ remote_system_host: '0.1.2.3'
+ remote_system_verifycert: False
+ remote_system_username: 'username'
+ remote_system_password: 'password'
+ destination_pool_name: "pool_test_1"
+ replication_state: "enable"
+ state: "present"
+
+- name: Modify replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_repl"
+ new_replication_name: "test_repl_updated"
+ replication_mode: "asynchronous"
+ rpo: 50
+ replication_state: "enable"
+ state: "present"
+
+- name: Disable replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_state: "disable"
+ state: "present"
+
+- name: Disable replication by specifying replication_name on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
+"""
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+filesystem_details:
+ description: Details of the filesystem.
+ returned: When filesystem exists
+ type: dict
+ contains:
+ id:
+ description: The system generated ID given to the filesystem.
+ type: str
+ name:
+ description: Name of the filesystem.
+ type: str
+ description:
+ description: Description about the filesystem.
+ type: str
+ is_data_reduction_enabled:
+ description: Whether or not compression enabled on this
+ filesystem.
+ type: bool
+ size_total_with_unit:
+ description: Size of the filesystem with actual unit.
+ type: str
+ tiering_policy:
+ description: Tiering policy applied to this filesystem.
+ type: str
+ is_cifs_notify_on_access_enabled:
+ description: Indicates whether the system generates a
+ notification when a user accesses the file system.
+ type: bool
+ is_cifs_notify_on_write_enabled:
+ description: Indicates whether the system generates a notification
+ when the file system is written to.
+ type: bool
+ is_cifs_op_locks_enabled:
+ description: Indicates whether opportunistic file locks are enabled
+ for the file system.
+ type: bool
+ is_cifs_sync_writes_enabled:
+ description: Indicates whether the CIFS synchronous writes option
+ is enabled for the file system.
+ type: bool
+ cifs_notify_on_change_dir_depth:
+ description: Indicates the lowest directory level to which the
+ enabled notifications apply, if any.
+ type: int
+ pool:
+ description: The pool in which this filesystem is allocated.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the pool.
+ type: str
+ name:
+ description: The name of the storage pool.
+ type: str
+ nas_server:
+ description: The NAS Server details on which this filesystem is hosted.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the NAS Server.
+ type: str
+ name:
+ description: The name of the NAS Server.
+ type: str
+ snapshots:
+ description: The list of snapshots of this filesystem.
+ type: list
+ contains:
+ id:
+ description: The system ID given to the filesystem
+ snapshot.
+ type: str
+ name:
+ description: The name of the filesystem snapshot.
+ type: str
+ is_thin_enabled:
+ description: Indicates whether thin provisioning is enabled for
+ this filesystem.
+ type: bool
+ snap_schedule_id:
+ description: Indicates the id of the snap schedule associated
+ with the filesystem.
+ type: str
+ snap_schedule_name:
+ description: Indicates the name of the snap schedule associated
+ with the filesystem.
+ type: str
+ quota_config:
+ description: Details of quota configuration of the filesystem
+ created.
+ type: dict
+ contains:
+ grace_period:
+ description: Grace period set in quota configuration
+ after soft limit is reached.
+ type: str
+ default_hard_limit:
+ description: Default hard limit for user quotas
+ and tree quotas.
+ type: int
+ default_soft_limit:
+ description: Default soft limit for user quotas
+ and tree quotas.
+ type: int
+ is_user_quota_enabled:
+ description: Indicates whether the user quota is enabled.
+ type: bool
+ quota_policy:
+ description: Quota policy set in quota configuration.
+ type: str
+ replication_sessions:
+ description: List of replication sessions if replication is enabled.
+ type: dict
+ contains:
+ id:
+ description: ID of replication session
+ type: str
+ name:
+ description: Name of replication session
+ type: str
+ remote_system:
+ description: Remote system
+ type: dict
+ contains:
+ id:
+ description: ID of remote system
+ type: str
+ sample: {
+ "access_policy": "AccessPolicyEnum.UNIX",
+ "cifs_notify_on_change_dir_depth": 512,
+ "cifs_share": null,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "description": "",
+ "existed": true,
+ "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN",
+ "format": "FSFormatEnum.UFS64",
+ "hash": 8735427610152,
+ "health": {
+ "UnityHealth": {
+ "hash": 8735427614928
+ }
+ },
+ "host_io_size": "HostIOSizeEnum.GENERAL_8K",
+ "id": "fs_65916",
+ "is_advanced_dedup_enabled": false,
+ "is_cifs_notify_on_access_enabled": false,
+ "is_cifs_notify_on_write_enabled": false,
+ "is_cifs_op_locks_enabled": false,
+ "is_cifs_sync_writes_enabled": false,
+ "is_data_reduction_enabled": false,
+ "is_read_only": false,
+ "is_smbca": false,
+ "is_thin_enabled": true,
+ "locking_policy": "FSLockingPolicyEnum.MANDATORY",
+ "metadata_size": 11274289152,
+ "metadata_size_allocated": 4294967296,
+ "min_size_allocated": 0,
+ "name": "test_fs",
+ "nas_server": {
+ "id": "nas_18",
+ "name": "test_nas1"
+ },
+ "nfs_share": null,
+ "per_tier_size_used": [
+ 6979321856,
+ 0,
+ 0
+ ],
+ "pool": {
+ "id": "pool_7",
+ "name": "pool 7"
+ },
+ "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES",
+ "quota_config": {
+ "default_hard_limit": "0B",
+ "default_soft_limit": "0B",
+ "grace_period": "7.0 days",
+ "id": "quotaconfig_171798760421_0",
+ "is_user_quota_enabled": false,
+ "quota_policy": "QuotaPolicyEnum.FILE_SIZE"
+ },
+ "replication_sessions": {
+ "current_transfer_est_remain_time": 0,
+ "id": "***",
+ "last_sync_time": "2022-05-12 11:20:38+00:00",
+ "local_role": "ReplicationSessionReplicationRoleEnum.SOURCE",
+ "max_time_out_of_sync": 60,
+ "members": null,
+ "name": "local_repl_new",
+ "network_status": "ReplicationSessionNetworkStatusEnum.OK",
+ "remote_system": {
+ "UnityRemoteSystem": {
+ "hash": 8735426929707
+ }
+ },
+ "replication_resource_type": "ReplicationEndpointResourceTypeEnum.FILESYSTEM",
+ "src_resource_id": "res_66444",
+ "src_status": "ReplicationSessionStatusEnum.OK",
+ "status": "ReplicationOpStatusEnum.AUTO_SYNC_CONFIGURED",
+ "sync_progress": 0,
+ "sync_state": "ReplicationSessionSyncStateEnum.IDLE"
+ },
+ "size_allocated": 283148288,
+ "size_allocated_total": 4578148352,
+ "size_preallocated": 2401173504,
+ "size_total": 10737418240,
+ "size_total_with_unit": "10.0 GB",
+ "size_used": 1620312064,
+ "snap_count": 2,
+ "snaps_size": 21474869248,
+ "snaps_size_allocated": 32768,
+ "snapshots": [],
+ "supported_protocols": "FSSupportedProtocolEnum.NFS",
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "FilesystemTypeEnum.FILESYSTEM"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('filesystem')
+
+application_type = "Ansible/1.6.0"
+
+
+class Filesystem(object):
+ """Class with FileSystem operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_filesystem_parameters())
+
+ mutually_exclusive = [['filesystem_name', 'filesystem_id'],
+ ['pool_name', 'pool_id'],
+ ['nas_server_name', 'nas_server_id'],
+ ['snap_schedule_name', 'snap_schedule_id']]
+
+ required_one_of = [['filesystem_name', 'filesystem_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def get_filesystem(self, name=None, id=None, obj_nas_server=None):
+ """Get the details of a FileSystem.
+ :param filesystem_name: The name of the filesystem
+ :param filesystem_id: The id of the filesystem
+ :param obj_nas_server: NAS Server object instance
+ :return: instance of the respective filesystem if exist.
+ """
+
+ id_or_name = id if id else name
+ errormsg = "Failed to get the filesystem {0} with error {1}"
+
+ try:
+ obj_fs = None
+ if id:
+ if obj_nas_server:
+ obj_fs = self.unity_conn.get_filesystem(
+ _id=id,
+ nas_server=obj_nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(_id=id)
+
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem "
+ "object %s ", obj_fs)
+ return obj_fs
+ elif name:
+ if not obj_nas_server:
+ err_msg = "NAS Server is required to get the FileSystem"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ obj_fs = self.unity_conn.get_filesystem(
+ name=name,
+ nas_server=obj_nas_server)
+ if obj_fs:
+ LOG.info(
+ "Successfully got the filesystem object %s ", obj_fs)
+ return obj_fs
+ else:
+ LOG.info("Failed to get the filesystem %s", id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, cred_err)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_server(self, name=None, id=None):
+ """Get the instance of a NAS Server.
+ :param name: The NAS Server name
+ :param id: The NAS Server id
+ :return: instance of the respective NAS Server if exists.
+ """
+
+ errormsg = "Failed to get the NAS Server {0} with error {1}"
+ id_or_name = name if name else id
+
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if id and obj_nas.existed:
+ LOG.info("Successfully got the nas server object %s",
+ obj_nas)
+ return obj_nas
+ elif name:
+ LOG.info("Successfully got the nas server object %s ",
+ obj_nas)
+ return obj_nas
+ else:
+ msg = "Failed to get the nas server with {0}".format(
+ id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_pool(self, pool_name=None, pool_id=None):
+ """Get the instance of a pool.
+ :param pool_name: The name of the pool
+ :param pool_id: The id of the pool
+ :return: Dict containing pool details if exists
+ """
+
+ id_or_name = pool_id if pool_id else pool_name
+ errormsg = "Failed to get the pool {0} with error {1}"
+
+ try:
+ obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)
+
+ if pool_id and obj_pool.existed:
+ LOG.info("Successfully got the pool object %s",
+ obj_pool)
+ return obj_pool
+ if pool_name:
+ LOG.info("Successfully got pool %s", obj_pool)
+ return obj_pool
+ else:
+ msg = "Failed to get the pool with {0}".format(
+ id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_tiering_policy_enum(self, tiering_policy):
+ """Get the tiering_policy enum.
+ :param tiering_policy: The tiering_policy string
+ :return: tiering_policy enum
+ """
+
+ if tiering_policy in utils.TieringPolicyEnum.__members__:
+ return utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_supported_protocol_enum(self, supported_protocol):
+ """Get the supported_protocol enum.
+ :param supported_protocol: The supported_protocol string
+ :return: supported_protocol enum
+ """
+
+ supported_protocol = "MULTI_PROTOCOL" if \
+ supported_protocol == "MULTIPROTOCOL" else supported_protocol
+ if supported_protocol in utils.FSSupportedProtocolEnum.__members__:
+ return utils.FSSupportedProtocolEnum[supported_protocol]
+ else:
+ errormsg = "Invalid choice {0} for supported_protocol".format(
+ supported_protocol)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_locking_policy_enum(self, locking_policy):
+ """Get the locking_policy enum.
+ :param locking_policy: The locking_policy string
+ :return: locking_policy enum
+ """
+ if locking_policy in utils.FSLockingPolicyEnum.__members__:
+ return utils.FSLockingPolicyEnum[locking_policy]
+ else:
+ errormsg = "Invalid choice {0} for locking_policy".format(
+ locking_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_access_policy_enum(self, access_policy):
+ """Get the access_policy enum.
+ :param access_policy: The access_policy string
+ :return: access_policy enum
+ """
+ if access_policy in utils.AccessPolicyEnum.__members__:
+ return utils.AccessPolicyEnum[access_policy]
+ else:
+ errormsg = "Invalid choice {0} for access_policy".format(
+ access_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_filesystem(self, name, obj_pool, obj_nas_server, size):
+ """Create a FileSystem.
+ :param name: Name of the FileSystem
+ :param obj_pool: Storage Pool obj instance
+ :param obj_nas_server: NAS Server obj instance
+ :param size: Total size of a filesystem in bytes
+ :return: FileSystem object on successful creation
+ """
+ try:
+
+ supported_protocol = self.module.params['supported_protocols']
+ supported_protocol = self.get_supported_protocol_enum(
+ supported_protocol) if supported_protocol else None
+ is_thin = self.module.params['is_thin']
+
+ tiering_policy = self.module.params['tiering_policy']
+ tiering_policy = self.get_tiering_policy_enum(tiering_policy) \
+ if tiering_policy else None
+
+ obj_fs = utils.UnityFileSystem.create(
+ self.unity_conn._cli,
+ pool=obj_pool,
+ nas_server=obj_nas_server,
+ name=name,
+ size=size,
+ proto=supported_protocol,
+ is_thin=is_thin,
+ tiering_policy=tiering_policy)
+
+ LOG.info("Successfully created file system , %s", obj_fs)
+ return obj_fs
+
+ except Exception as e:
+ errormsg = "Create filesystem {0} operation failed" \
+ " with error {1}".format(name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_filesystem(self, id):
+ """Delete a FileSystem.
+ :param id: The object instance of the filesystem to be deleted
+ """
+
+ try:
+ obj_fs = self.get_filesystem(id=id)
+ obj_fs_dict = obj_fs._get_properties()
+ if obj_fs_dict['cifs_share'] is not None:
+ errormsg = "The Filesystem has SMB Shares. Hence deleting " \
+ "this filesystem is not safe."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if obj_fs_dict['nfs_share'] is not None:
+ errormsg = "The FileSystem has NFS Exports. Hence deleting " \
+ "this filesystem is not safe."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ obj_fs.delete()
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of FileSystem id:{0} " \
+ "failed with error {1}".format(id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def is_modify_required(self, obj_fs, cap_unit):
+ """Checks if any modify required for filesystem attributes
+ :param obj_fs: filesystem instance
+ :param cap_unit: capacity unit
+ :return: filesystem to update dict
+ """
+ try:
+ to_update = {}
+ obj_fs = obj_fs.update()
+ description = self.module.params['description']
+
+ if description is not None and description != obj_fs.description:
+ to_update.update({'description': description})
+
+ size = self.module.params['size']
+ if size and cap_unit:
+ size_byte = int(utils.get_size_bytes(size, cap_unit))
+ if size_byte < obj_fs.size_total:
+ self.module.fail_json(msg="Filesystem size can be "
+ "expanded only")
+ elif size_byte > obj_fs.size_total:
+ to_update.update({'size': size_byte})
+
+ tiering_policy = self.module.params['tiering_policy']
+ if tiering_policy and self.get_tiering_policy_enum(
+ tiering_policy) != obj_fs.tiering_policy:
+ to_update.update({'tiering_policy':
+ self.get_tiering_policy_enum(
+ tiering_policy)})
+
+ is_thin = self.module.params['is_thin']
+ if is_thin is not None and is_thin != obj_fs.is_thin_enabled:
+ to_update.update({'is_thin': is_thin})
+
+ data_reduction = self.module.params['data_reduction']
+ if data_reduction is not None and \
+ data_reduction != obj_fs.is_data_reduction_enabled:
+ to_update.update({'is_compression': data_reduction})
+
+ access_policy = self.module.params['access_policy']
+ if access_policy and self.get_access_policy_enum(
+ access_policy) != obj_fs.access_policy:
+ to_update.update({'access_policy':
+ self.get_access_policy_enum(access_policy)})
+
+ locking_policy = self.module.params['locking_policy']
+ if locking_policy and self.get_locking_policy_enum(
+ locking_policy) != obj_fs.locking_policy:
+ to_update.update({'locking_policy':
+ self.get_locking_policy_enum(
+ locking_policy)})
+
+ snap_sch = obj_fs.storage_resource.snap_schedule
+
+ if self.snap_sch_id is not None:
+ if self.snap_sch_id == "":
+ if snap_sch and snap_sch.id != self.snap_sch_id:
+ to_update.update({'is_snap_schedule_paused': False})
+ elif snap_sch is None or snap_sch.id != self.snap_sch_id:
+ to_update.update({'snap_sch_id': self.snap_sch_id})
+
+ smb_properties = self.module.params['smb_properties']
+ if smb_properties:
+ sync_writes_enabled = \
+ smb_properties['is_smb_sync_writes_enabled']
+ oplocks_enabled = \
+ smb_properties['is_smb_op_locks_enabled']
+ notify_on_write = \
+ smb_properties['is_smb_notify_on_write_enabled']
+ notify_on_access = \
+ smb_properties['is_smb_notify_on_access_enabled']
+ notify_on_change_dir_depth = \
+ smb_properties['smb_notify_on_change_dir_depth']
+
+ if sync_writes_enabled is not None and \
+ sync_writes_enabled != obj_fs.is_cifs_sync_writes_enabled:
+ to_update.update(
+ {'is_cifs_sync_writes_enabled': sync_writes_enabled})
+
+ if oplocks_enabled is not None and \
+ oplocks_enabled != obj_fs.is_cifs_op_locks_enabled:
+ to_update.update(
+ {'is_cifs_op_locks_enabled': oplocks_enabled})
+
+ if notify_on_write is not None and \
+ notify_on_write != \
+ obj_fs.is_cifs_notify_on_write_enabled:
+ to_update.update(
+ {'is_cifs_notify_on_write_enabled': notify_on_write})
+
+ if notify_on_access is not None and \
+ notify_on_access != \
+ obj_fs.is_cifs_notify_on_access_enabled:
+ to_update.update(
+ {'is_cifs_notify_on_access_enabled':
+ notify_on_access})
+
+ if notify_on_change_dir_depth is not None and \
+ notify_on_change_dir_depth != \
+ obj_fs.cifs_notify_on_change_dir_depth:
+ to_update.update(
+ {'cifs_notify_on_change_dir_depth':
+ notify_on_change_dir_depth})
+ if len(to_update) > 0:
+ return to_update
+ else:
+ return None
+
+ except Exception as e:
+ errormsg = "Failed to determine if FileSystem id: {0}" \
+ " modification required with error {1}".format(obj_fs.id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_filesystem(self, update_dict, obj_fs):
+ """ modifes attributes for a filesystem instance
+ :param update_dict: modify dict
+ :return: True on Success
+ """
+ try:
+ adv_smb_params = [
+ 'is_cifs_sync_writes_enabled',
+ 'is_cifs_op_locks_enabled',
+ 'is_cifs_notify_on_write_enabled',
+ 'is_cifs_notify_on_access_enabled',
+ 'cifs_notify_on_change_dir_depth']
+
+ cifs_fs_payload = {}
+ fs_update_payload = {}
+
+ for smb_param in adv_smb_params:
+ if smb_param in update_dict.keys():
+ cifs_fs_payload.update({smb_param: update_dict[smb_param]})
+
+ LOG.debug("CIFS Modify Payload: %s", cifs_fs_payload)
+
+ cifs_fs_parameters = obj_fs.prepare_cifs_fs_parameters(
+ **cifs_fs_payload)
+
+ fs_update_params = [
+ 'size',
+ 'is_thin',
+ 'tiering_policy',
+ 'is_compression',
+ 'access_policy',
+ 'locking_policy',
+ 'description',
+ 'cifs_fs_parameters']
+
+ for fs_param in fs_update_params:
+ if fs_param in update_dict.keys():
+ fs_update_payload.update({fs_param: update_dict[fs_param]})
+
+ if cifs_fs_parameters:
+ fs_update_payload.update(
+ {'cifs_fs_parameters': cifs_fs_parameters})
+
+ if "snap_sch_id" in update_dict.keys():
+ fs_update_payload.update(
+ {'snap_schedule_parameters': {'snapSchedule':
+ {'id': update_dict.get('snap_sch_id')}
+ }}
+ )
+ elif "is_snap_schedule_paused" in update_dict.keys():
+ fs_update_payload.update(
+ {'snap_schedule_parameters': {'isSnapSchedulePaused': False}
+ })
+
+ obj_fs = obj_fs.update()
+ resp = obj_fs.modify(**fs_update_payload)
+ LOG.info("Successfully modified the FS with response %s", resp)
+
+ except Exception as e:
+ errormsg = "Failed to modify FileSystem instance id: {0}" \
+ " with error {1}".format(obj_fs.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem_display_attributes(self, obj_fs):
+ """get display filesystem attributes
+ :param obj_fs: filesystem instance
+ :return: filesystem dict to display
+ """
+ try:
+ obj_fs = obj_fs.update()
+ filesystem_details = obj_fs._get_properties()
+ filesystem_details['size_total_with_unit'] = utils. \
+ convert_size_with_unit(int(filesystem_details['size_total']))
+ if obj_fs.pool:
+ filesystem_details.update(
+ {'pool': {'name': obj_fs.pool.name,
+ 'id': obj_fs.pool.id}})
+ if obj_fs.nas_server:
+ filesystem_details.update(
+ {'nas_server': {'name': obj_fs.nas_server.name,
+ 'id': obj_fs.nas_server.id}})
+ snap_list = []
+ if obj_fs.has_snap():
+ for snap in obj_fs.snapshots:
+ d = {'name': snap.name, 'id': snap.id}
+ snap_list.append(d)
+ filesystem_details['snapshots'] = snap_list
+
+ if obj_fs.storage_resource.snap_schedule:
+ filesystem_details['snap_schedule_id'] = obj_fs.storage_resource.snap_schedule.id
+ filesystem_details['snap_schedule_name'] = obj_fs.storage_resource.snap_schedule.name
+
+ quota_config_obj = self.get_quota_config_details(obj_fs)
+
+ if quota_config_obj:
+
+ hard_limit = utils.convert_size_with_unit(
+ quota_config_obj.default_hard_limit)
+ soft_limit = utils.convert_size_with_unit(
+ quota_config_obj.default_soft_limit)
+ grace_period = get_time_with_unit(
+ quota_config_obj.grace_period)
+
+ filesystem_details.update({'quota_config':
+ {'id': quota_config_obj.id,
+ 'default_hard_limit': hard_limit,
+ 'default_soft_limit': soft_limit,
+ 'is_user_quota_enabled':
+ quota_config_obj.is_user_quota_enabled,
+ 'quota_policy': quota_config_obj._get_properties()[
+ 'quota_policy'],
+ 'grace_period': grace_period}
+ })
+ filesystem_details['replication_sessions'] = []
+ fs_repl_sessions = self.get_replication_session(obj_fs)
+ if fs_repl_sessions:
+ filesystem_details['replication_sessions'] = \
+ fs_repl_sessions._get_properties()
+ return filesystem_details
+
+ except Exception as e:
+ errormsg = "Failed to display the filesystem {0} with " \
+ "error {1}".format(obj_fs.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_input_string(self):
+ """ validates the input string checks if it is empty string """
+ invalid_string = ""
+ try:
+ for key in self.module.params:
+ val = self.module.params[key]
+ if key == "description" or key == "snap_schedule_name" \
+ or key == "snap_schedule_id":
+ continue
+ if isinstance(val, str) \
+ and val == invalid_string:
+ errmsg = 'Invalid input parameter "" for {0}'.format(
+ key)
+ self.module.fail_json(msg=errmsg)
+ if self.module.params['replication_params'] and self.module.params['replication_state'] is None:
+ self.module.fail_json(msg="Please specify replication_state along with replication_params")
+ except Exception as e:
+ errormsg = "Failed to validate the module param with " \
+ "error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def resolve_to_snapschedule_id(self, params):
+ """ Get snapshot id for a give snap schedule name
+ :param params: snap schedule name or id
+ :return: snap schedule id after validation
+ """
+
+ try:
+ snap_sch_id = None
+ snapshot_schedule = {}
+ if params["name"]:
+ snapshot_schedule = utils.UnitySnapScheduleList.get(self.unity_conn._cli, name=params["name"])
+ elif params["id"]:
+ snapshot_schedule = utils.UnitySnapScheduleList.get(self.unity_conn._cli, id=params["id"])
+
+ if snapshot_schedule:
+ snap_sch_id = snapshot_schedule.id[0]
+
+ if not snap_sch_id:
+ errormsg = "Failed to find the snapshot schedule id against given name " \
+ "or id: {0}".format(params["name"]), (params["id"])
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ return snap_sch_id
+
+ except Exception as e:
+ errormsg = "Failed to find the snapshot schedules with " \
+ "error {0}".format(str(e))
+
+ def get_quota_config_details(self, obj_fs):
+ """
+ Get the quota config ID mapped to the filesystem
+ :param obj_fs: Filesystem instance
+ :return: Quota config object if exists else None
+ """
+ try:
+ all_quota_config = self.unity_conn.get_quota_config(filesystem=obj_fs)
+ fs_id = obj_fs.id
+
+ if len(all_quota_config) == 0:
+ LOG.error("The quota_config object for new filesystem "
+ "is not updated yet.")
+ return None
+
+ for quota_config in range(len(all_quota_config)):
+ if fs_id and all_quota_config[quota_config].filesystem.id == fs_id and \
+ not all_quota_config[quota_config].tree_quota:
+ msg = "Quota config id for filesystem %s is %s" \
+ % (fs_id, all_quota_config[quota_config].id)
+ LOG.info(msg)
+ return all_quota_config[quota_config]
+
+ except Exception as e:
+ errormsg = "Failed to fetch quota config for filesystem {0} " \
+ " with error {1}".format(fs_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_quota_config(self, quota_config_obj, quota_config_params):
+ """
+ Modify default quota config settings of newly created filesystem.
+ The default setting of quota config after filesystem creation is:
+ default_soft_limit and default_hard_limit are 0,
+ is_user_quota_enabled is false,
+ grace_period is 7 days and,
+ quota_policy is FILE_SIZE.
+ :param quota_config_obj: Quota config instance
+ :param quota_config_params: Quota config parameters to be modified
+ :return: Boolean whether quota config is modified
+ """
+
+ if quota_config_params:
+ soft_limit = quota_config_params['default_soft_limit']
+ hard_limit = quota_config_params['default_hard_limit']
+ is_user_quota_enabled = quota_config_params['is_user_quota_enabled']
+ quota_policy = quota_config_params['quota_policy']
+ grace_period = quota_config_params['grace_period']
+ cap_unit = quota_config_params['cap_unit']
+ gp_unit = quota_config_params['grace_period_unit']
+
+ if soft_limit:
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, cap_unit)
+ else:
+ soft_limit_in_bytes = quota_config_obj.default_soft_limit
+
+ if hard_limit:
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, cap_unit)
+ else:
+ hard_limit_in_bytes = quota_config_obj.default_hard_limit
+
+ if grace_period:
+ grace_period_in_sec = get_time_in_seconds(grace_period, gp_unit)
+ else:
+ grace_period_in_sec = quota_config_obj.grace_period
+
+ policy_enum = None
+ policy_enum_val = None
+ if quota_policy:
+ if utils.QuotaPolicyEnum[quota_policy]:
+ policy_enum = utils.QuotaPolicyEnum[quota_policy]
+ policy_enum_val = \
+ utils.QuotaPolicyEnum[quota_policy]._get_properties()['value']
+ else:
+ errormsg = "Invalid choice {0} for quota policy".format(
+ quota_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Verify if modify is required. If not required, return False
+ if quota_config_obj.default_hard_limit == hard_limit_in_bytes and \
+ quota_config_obj.default_soft_limit == soft_limit_in_bytes and \
+ quota_config_obj.grace_period == grace_period_in_sec and \
+ ((quota_policy is not None and
+ quota_config_obj.quota_policy == policy_enum) or
+ quota_policy is None) and \
+ (is_user_quota_enabled is None or
+ (is_user_quota_enabled is not None and
+ is_user_quota_enabled == quota_config_obj.is_user_quota_enabled)):
+ return False
+
+ try:
+ resp = self.unity_conn.modify_quota_config(
+ quota_config_id=quota_config_obj.id,
+ grace_period=grace_period_in_sec,
+ default_hard_limit=hard_limit_in_bytes,
+ default_soft_limit=soft_limit_in_bytes,
+ is_user_quota_enabled=is_user_quota_enabled,
+ quota_policy=policy_enum_val)
+ LOG.info("Successfully modified the quota config with response %s", resp)
+ return True
+
+ except Exception as e:
+ errormsg = "Failed to modify quota config for filesystem {0} " \
+ " with error {1}".format(quota_config_obj.filesystem.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def update_replication_params(self, replication_params):
+ ''' Update replication params '''
+ try:
+ if replication_params['replication_type'] == 'remote' or \
+ (replication_params['replication_type'] is None and
+ replication_params['remote_system']):
+ connection_params = {
+ 'unispherehost': replication_params['remote_system']['remote_system_host'],
+ 'username': replication_params['remote_system']['remote_system_username'],
+ 'password': replication_params['remote_system']['remote_system_password'],
+ 'validate_certs': replication_params['remote_system']['remote_system_verifycert'],
+ 'port': replication_params['remote_system']['remote_system_port']
+ }
+ remote_system_conn = utils.get_unity_unisphere_connection(
+ connection_params, application_type)
+ replication_params['remote_system_name'] = remote_system_conn.name
+ if replication_params['destination_pool_name'] is not None:
+ pool_object = \
+ remote_system_conn.get_pool(name=replication_params['destination_pool_name'])
+ replication_params['destination_pool_id'] = pool_object.id
+ else:
+ if replication_params['destination_pool_name'] is not None:
+ pool_object = \
+ self.unity_conn.get_pool(name=replication_params['destination_pool_name'])
+ replication_params['destination_pool_id'] = pool_object.id
+ except Exception as e:
+ errormsg = "Updating replication params failed" \
+ " with error %s" % str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_rpo(self, replication_params):
+ ''' Validates rpo based on replication mode '''
+ if replication_params['replication_mode'] == 'asynchronous' and \
+ replication_params['rpo'] is None:
+ errormsg = "rpo is required together with 'asynchronous' replication_mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ rpo, replication_mode = replication_params['rpo'], replication_params[
+ 'replication_mode']
+
+ if rpo and replication_mode:
+
+ rpo_criteria = {
+ "asynchronous": lambda n: 5 <= n <= 1440,
+ "synchronous": lambda n: n == 0,
+ "manual": lambda n: n == -1
+ }
+
+ if rpo and not rpo_criteria[replication_mode](rpo):
+ errormsg = f"Invalid rpo value - {rpo} for " \
+ f"{replication_mode} replication mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_replication_params(self, replication_params):
+ ''' Validate replication params '''
+ if not replication_params:
+ errormsg = "Please specify replication_params to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if replication_params['destination_pool_id'] is not None and \
+ replication_params['destination_pool_name'] is not None:
+ errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ self.validate_rpo(replication_params)
+ # Validate replication type
+ if replication_params['replication_type'] == 'remote' and replication_params['remote_system'] is None:
+ errormsg = "Remote_system is required together with 'remote' replication_type"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_create_replication_params(self, replication_params):
+ ''' Validate replication params '''
+
+ if replication_params['destination_pool_id'] is None and \
+ replication_params['destination_pool_name'] is None:
+ errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ keys = ['replication_mode', 'replication_type']
+ for key in keys:
+ if replication_params[key] is None:
+ errormsg = "Please specify %s to enable replication." % key
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_replication_session(self, obj_fs, repl_session, replication_params):
+ """ Modify the replication session
+ :param: obj_fs: Filesystem object
+ :param: repl_session: Replication session to be modified
+ :param: replication_params: Module input params
+ :return: True if modification is successful
+ """
+ try:
+ LOG.info("Modifying replication session of filesystem %s", obj_fs.name)
+ modify_payload = {}
+ if replication_params['replication_mode']:
+ if replication_params['replication_mode'] == 'manual':
+ rpo = -1
+ elif replication_params['replication_mode'] == 'synchronous':
+ rpo = 0
+ elif replication_params['rpo']:
+ rpo = replication_params['rpo']
+ name = repl_session.name
+ if replication_params['new_replication_name'] and \
+ name != replication_params['new_replication_name']:
+ name = replication_params['new_replication_name']
+
+ if repl_session.name != name:
+ modify_payload['name'] = name
+ if ((replication_params['replication_mode'] or replication_params['rpo']) and
+ repl_session.max_time_out_of_sync != rpo):
+ modify_payload['max_time_out_of_sync'] = rpo
+
+ if modify_payload:
+ repl_session.modify(**modify_payload)
+ return True
+
+ return False
+ except Exception as e:
+ errormsg = "Modifying replication session failed with error %s" % e
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def enable_replication(self, obj_fs, replication_params):
+ """ Enable the replication session
+ :param: obj_fs: Filesystem object
+ :param: replication_params: Module input params
+ :return: True if enabling replication is successful
+ """
+ try:
+ self.validate_replication_params(replication_params)
+ self.update_replication_params(replication_params)
+
+ repl_session = \
+ self.get_replication_session_on_filter(obj_fs, replication_params, "modify")
+ if repl_session:
+ return self.modify_replication_session(obj_fs, repl_session, replication_params)
+
+ self.validate_create_replication_params(replication_params)
+ replication_args_list = get_replication_args_list(replication_params)
+ if 'remote_system_name' in replication_params:
+ remote_system_name = replication_params['remote_system_name']
+ remote_system_list = self.unity_conn.get_remote_system()
+ for remote_system in remote_system_list:
+ if remote_system.name == remote_system_name:
+ replication_args_list['remote_system'] = remote_system
+ break
+ if 'remote_system' not in replication_args_list.keys():
+ errormsg = "Remote system %s is not found" % (remote_system_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ LOG.info("Enabling replication to the filesystem %s", obj_fs.name)
+ obj_fs.replicate_with_dst_resource_provisioning(**replication_args_list)
+ return True
+ except Exception as e:
+ errormsg = "Enabling replication to the filesystem %s failed " \
+ "with error %s" % (obj_fs.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def disable_replication(self, obj_fs, replication_params):
+ """ Remove replication from the filesystem
+ :param: replication_params: Module input params
+ :return: True if disabling replication is successful
+ """
+ try:
+ LOG.info(("Disabling replication on the filesystem %s", obj_fs.name))
+ if replication_params:
+ self.update_replication_params(replication_params)
+ repl_session = \
+ self.get_replication_session_on_filter(obj_fs, replication_params, "delete")
+ if repl_session:
+ repl_session.delete()
+ return True
+ return False
+ except Exception as e:
+ errormsg = "Disabling replication on the filesystem %s failed " \
+ "with error %s" % (obj_fs.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_replication_session_on_filter(self, obj_fs, replication_params, action):
+ if replication_params and replication_params['remote_system']:
+ repl_session = \
+ self.get_replication_session(obj_fs, filter_key="remote_system_name",
+ replication_params=replication_params)
+ elif replication_params and replication_params['replication_name']:
+ repl_session = \
+ self.get_replication_session(obj_fs, filter_key="name",
+ name=replication_params['replication_name'])
+ else:
+ repl_session = self.get_replication_session(obj_fs, action=action)
+ if repl_session and action and replication_params and \
+ replication_params['replication_type'] == 'local' and \
+ repl_session.remote_system.name != self.unity_conn.name:
+ return None
+
+ return repl_session
+
+ def get_replication_session(self, obj_fs, filter_key=None, replication_params=None, name=None, action=None):
+ """ Retrieves the replication sessions configured for the filesystem
+ :param: obj_fs: Filesystem object
+ :param: filter_key: Key to filter replication sessions
+ :param: replication_params: Module input params
+ :param: name: Replication session name
+ :param: action: Specifies modify or delete action on replication session
+ :return: Replication session details
+ """
+ try:
+ repl_session = self.unity_conn.get_replication_session(src_resource_id=obj_fs.storage_resource.id)
+ if not filter_key and repl_session:
+ if len(repl_session) > 1:
+ if action:
+ error_msg = 'There are multiple replication sessions for the filesystem.'\
+ ' Please specify replication_name in replication_params to %s.' % action
+ self.module.fail_json(msg=error_msg)
+ return repl_session
+ return repl_session[0]
+ for session in repl_session:
+ if filter_key == 'remote_system_name' and \
+ session.remote_system.name == replication_params['remote_system_name']:
+ return session
+ if filter_key == 'name' and session.name == name:
+ return session
+ return None
+ except Exception as e:
+ errormsg = "Retrieving replication session on the filesystem failed " \
+ "with error %s", str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on filesystem module based on parameters
+ passed in the playbook
+ """
+ filesystem_name = self.module.params['filesystem_name']
+ filesystem_id = self.module.params['filesystem_id']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ pool_name = self.module.params['pool_name']
+ pool_id = self.module.params['pool_id']
+ size = self.module.params['size']
+ cap_unit = self.module.params['cap_unit']
+ quota_config = self.module.params['quota_config']
+ replication_params = self.module.params['replication_params']
+ replication_state = self.module.params['replication_state']
+ state = self.module.params['state']
+ snap_schedule_name = self.module.params['snap_schedule_name']
+ snap_schedule_id = self.module.params['snap_schedule_id']
+
+ # result is a dictionary to contain end state and FileSystem details
+ changed = False
+ result = dict(
+ changed=False,
+ filesystem_details={}
+ )
+
+ to_modify_dict = None
+ filesystem_details = None
+ quota_config_obj = None
+
+ self.validate_input_string()
+
+ if size is not None and size == 0:
+ self.module.fail_json(msg="Size can not be 0 (Zero)")
+
+ if size and not cap_unit:
+ cap_unit = 'GB'
+
+ if quota_config:
+ if (quota_config['default_hard_limit'] is not None
+ or quota_config['default_soft_limit'] is not None) and \
+ not quota_config['cap_unit']:
+ quota_config['cap_unit'] = 'GB'
+
+ if quota_config['grace_period'] is not None \
+ and quota_config['grace_period_unit'] is None:
+ quota_config['grace_period_unit'] = 'days'
+
+ if quota_config['grace_period'] is not None \
+ and quota_config['grace_period'] <= 0:
+ self.module.fail_json(msg="Invalid grace_period provided. "
+ "Must be greater than 0.")
+
+ if quota_config['default_soft_limit'] is not None \
+ and utils.is_size_negative(quota_config['default_soft_limit']):
+ self.module.fail_json(msg="Invalid default_soft_limit provided. "
+ "Must be greater than or equal to 0.")
+
+ if quota_config['default_hard_limit'] is not None \
+ and utils.is_size_negative(quota_config['default_hard_limit']):
+ self.module.fail_json(msg="Invalid default_hard_limit provided. "
+ "Must be greater than or equal to 0.")
+
+ if (cap_unit is not None) and not size:
+ self.module.fail_json(msg="cap_unit can be specified along "
+ "with size")
+
+ nas_server = None
+ if nas_server_name or nas_server_id:
+ nas_server = self.get_nas_server(
+ name=nas_server_name, id=nas_server_id)
+
+ obj_pool = None
+ if pool_name or pool_id:
+ obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id)
+
+ obj_fs = None
+ obj_fs = self.get_filesystem(name=filesystem_name,
+ id=filesystem_id,
+ obj_nas_server=nas_server)
+
+ self.snap_sch_id = None
+ if snap_schedule_name or snap_schedule_id:
+ snap_schedule_params = {
+ "name": snap_schedule_name,
+ "id": snap_schedule_id
+ }
+ self.snap_sch_id = self.resolve_to_snapschedule_id(snap_schedule_params)
+ elif snap_schedule_name == "" or snap_schedule_id == "":
+ self.snap_sch_id = ""
+
+ if obj_fs:
+ filesystem_details = obj_fs._get_properties()
+ filesystem_id = obj_fs.get_id()
+ to_modify_dict = self.is_modify_required(obj_fs, cap_unit)
+ LOG.info("From Mod Op, to_modify_dict: %s", to_modify_dict)
+
+ if state == 'present' and not filesystem_details:
+ if not filesystem_name:
+ msg_noname = "FileSystem with id {0} is not found, unable to " \
+ "create a FileSystem without a valid " \
+ "filesystem_name".format(filesystem_id)
+ self.module.fail_json(msg=msg_noname)
+
+ if not pool_name and not pool_id:
+ self.module.fail_json(msg="pool_id or pool_name is required "
+ "to create new filesystem")
+ if not size:
+ self.module.fail_json(msg="Size is required to create"
+ " a filesystem")
+ size = utils.get_size_bytes(size, cap_unit)
+
+ obj_fs = self.create_filesystem(name=filesystem_name,
+ obj_pool=obj_pool,
+ obj_nas_server=nas_server,
+ size=size)
+
+ LOG.debug("Successfully created filesystem , %s", obj_fs)
+ filesystem_id = obj_fs.id
+ filesystem_details = obj_fs._get_properties()
+ to_modify_dict = self.is_modify_required(obj_fs, cap_unit)
+ LOG.debug("Got filesystem id , %s", filesystem_id)
+ changed = True
+
+ if state == 'present' and filesystem_details and to_modify_dict:
+ self.modify_filesystem(update_dict=to_modify_dict, obj_fs=obj_fs)
+ changed = True
+
+ """
+ Set quota configuration
+ """
+ if state == "present" and filesystem_details and quota_config:
+ quota_config_obj = self.get_quota_config_details(obj_fs)
+
+ if quota_config_obj is not None:
+ is_quota_config_modified = self.modify_quota_config(
+ quota_config_obj=quota_config_obj,
+ quota_config_params=quota_config)
+
+ if is_quota_config_modified:
+ changed = True
+ else:
+ self.module.fail_json(msg="One or more operations related"
+ " to this task failed because the"
+ " new object created could not be fetched."
+ " Please rerun the task for expected result.")
+
+ if state == 'present' and filesystem_details and replication_state is not None:
+ if replication_state == 'enable':
+ changed = self.enable_replication(obj_fs, replication_params)
+ else:
+ changed = self.disable_replication(obj_fs, replication_params)
+
+ if state == 'absent' and filesystem_details:
+ changed = self.delete_filesystem(filesystem_id)
+ filesystem_details = None
+
+ if state == 'present' and filesystem_details:
+ filesystem_details = self.get_filesystem_display_attributes(
+ obj_fs=obj_fs)
+
+ result['changed'] = changed
+ result['filesystem_details'] = filesystem_details
+ self.module.exit_json(**result)
+
+
+def get_time_in_seconds(time, time_units):
+ """This method get time is seconds"""
+ min_in_sec = 60
+ hour_in_sec = 60 * 60
+ day_in_sec = 24 * 60 * 60
+ if time is not None and time > 0:
+ if time_units in 'minutes':
+ return time * min_in_sec
+ elif time_units in 'hours':
+ return time * hour_in_sec
+ elif time_units in 'days':
+ return time * day_in_sec
+ else:
+ return time
+ else:
+ return 0
+
+
+def get_time_with_unit(time):
+ """This method sets seconds in minutes, hours or days."""
+ sec_in_min = 60
+ sec_in_hour = 60 * 60
+ sec_in_day = 24 * 60 * 60
+
+ if time % sec_in_day == 0:
+ time = time / sec_in_day
+ unit = 'days'
+
+ elif time % sec_in_hour == 0:
+ time = time / sec_in_hour
+ unit = 'hours'
+
+ else:
+ time = time / sec_in_min
+ unit = 'minutes'
+ return "%s %s" % (time, unit)
+
+
+def get_replication_args_list(replication_params):
+ """Returns the replication args for payload"""
+ replication_args_list = {
+ 'dst_pool_id': replication_params['destination_pool_id']
+ }
+
+ if replication_params['replication_name']:
+ replication_args_list['replication_name'] = replication_params['replication_name']
+
+ if 'replication_mode' in replication_params:
+ if replication_params['replication_mode'] == 'asynchronous':
+ replication_args_list['max_time_out_of_sync'] = replication_params['rpo']
+ elif replication_params['replication_mode'] == 'synchronous':
+ replication_args_list['max_time_out_of_sync'] = 0
+ else:
+ replication_args_list['max_time_out_of_sync'] = -1
+
+ return replication_args_list
+
+
+def get_filesystem_parameters():
+ """This method provide parameters required for the ansible filesystem
+ module on Unity"""
+ return dict(
+ filesystem_name=dict(required=False, type='str'),
+ filesystem_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ pool_name=dict(required=False, type='str'),
+ pool_id=dict(required=False, type='str'),
+ size=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['GB', 'TB']),
+ is_thin=dict(required=False, type='bool'),
+ data_reduction=dict(required=False, type='bool'),
+ supported_protocols=dict(required=False, type='str',
+ choices=['NFS', 'CIFS', 'MULTIPROTOCOL']),
+ smb_properties=dict(type='dict', options=dict(
+ is_smb_sync_writes_enabled=dict(type='bool'),
+ is_smb_notify_on_access_enabled=dict(type='bool'),
+ is_smb_op_locks_enabled=dict(type='bool'),
+ is_smb_notify_on_write_enabled=dict(type='bool'),
+ smb_notify_on_change_dir_depth=dict(type='int')
+ )),
+ access_policy=dict(required=False, type='str',
+ choices=['NATIVE', 'UNIX', 'WINDOWS']),
+ locking_policy=dict(required=False, type='str',
+ choices=['ADVISORY', 'MANDATORY']),
+ tiering_policy=dict(required=False, type='str', choices=[
+ 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
+ snap_schedule_name=dict(required=False, type='str'),
+ snap_schedule_id=dict(required=False, type='str'),
+ quota_config=dict(required=False, type='dict', options=dict(
+ grace_period=dict(required=False, type='int'),
+ grace_period_unit=dict(required=False, type='str', choices=['minutes', 'hours', 'days']),
+ default_hard_limit=dict(required=False, type='int'),
+ default_soft_limit=dict(required=False, type='int'),
+ is_user_quota_enabled=dict(required=False, type='bool'),
+ quota_policy=dict(required=False, type='str', choices=['FILE_SIZE', 'BLOCKS']),
+ cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
+ ), mutually_exclusive=[['is_user_quota_enabled', 'quota_policy']]),
+ replication_params=dict(type='dict', options=dict(
+ replication_name=dict(type='str'),
+ new_replication_name=dict(type='str'),
+ replication_type=dict(type='str', choices=['local', 'remote']),
+ replication_mode=dict(type='str',
+ choices=['synchronous', 'asynchronous',
+ 'manual']),
+ rpo=dict(type='int'),
+ remote_system=dict(type='dict',
+ options=dict(
+ remote_system_host=dict(type='str', required=True),
+ remote_system_verifycert=dict(type='bool', required=False,
+ default=True),
+ remote_system_username=dict(type='str', required=True),
+ remote_system_password=dict(type='str', required=True, no_log=True),
+ remote_system_port=dict(type='int', required=False, default=443)
+ )),
+ destination_pool_name=dict(type='str'),
+ destination_pool_id=dict(type='str')
+ )),
+ replication_state=dict(type='str', choices=['enable', 'disable']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity FileSystem object and perform action on it
+ based on user input from playbook"""
+ obj = Filesystem()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py
new file mode 100644
index 000000000..35e536a47
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py
@@ -0,0 +1,769 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing Filesystem Snapshots on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: filesystem_snapshot
+short_description: Manage filesystem snapshot on the Unity storage system
+description:
+- Managing Filesystem Snapshot on the Unity storage system includes
+ create filesystem snapshot, get filesystem snapshot, modify filesystem
+ snapshot and delete filesystem snapshot.
+version_added: '1.1.0'
+extends_documentation_fragment:
+ - dellemc.unity.unity
+author:
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+options:
+ snapshot_name:
+ description:
+ - The name of the filesystem snapshot.
+ - Mandatory parameter for creating a filesystem snapshot.
+ - For all other operations either I(snapshot_name) or I(snapshot_id)
+ is required.
+ type: str
+ snapshot_id:
+ description:
+ - During creation snapshot_id is auto generated.
+ - For all other operations either I(snapshot_id) or I(snapshot_name)
+ is required.
+ type: str
+ filesystem_name:
+ description:
+ - The name of the Filesystem for which snapshot is created.
+ - For creation of filesystem snapshot either I(filesystem_name) or
+ I(filesystem_id) is required.
+ - Not required for other operations.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the Filesystem for which snapshot is created.
+ - For creation of filesystem snapshot either I(filesystem_id) or
+ I(filesystem_name) is required.
+ - Not required for other operations.
+ type: str
+ nas_server_name:
+ description:
+ - The name of the NAS server in which the Filesystem is created.
+ - For creation of filesystem snapshot either I(nas_server_name) or
+ I(nas_server_id) is required.
+ - Not required for other operations.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS server in which the Filesystem is created.
+ - For creation of filesystem snapshot either I(filesystem_id) or
+ I(filesystem_name) is required.
+ - Not required for other operations.
+ type: str
+ auto_delete:
+ description:
+ - This option specifies whether or not the filesystem snapshot will be
+ automatically deleted.
+ - If set to C(true), the filesystem snapshot will expire based on the pool
+ auto deletion policy.
+ - If set to C(false), the filesystem snapshot will not be auto deleted
+ based on the pool auto deletion policy.
+ - Option I(auto_delete) can not be set to C(true), if I(expiry_time) is specified.
+ - If during creation neither I(auto_delete) nor I(expiry_time) is mentioned
+ then the filesystem snapshot will be created keeping I(auto_delete) as
+ C(true).
+ - Once the I(expiry_time) is set, then the filesystem snapshot cannot be
+ assigned to the auto delete policy.
+ type: bool
+ expiry_time:
+ description:
+ - This option is for specifying the date and time after which the
+ filesystem snapshot will expire.
+ - The time is to be mentioned in UTC timezone.
+ - The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
+ type: str
+ description:
+ description:
+ - The additional information about the filesystem snapshot can be
+ provided using this option.
+ - The description can be removed by passing an empty string.
+ type: str
+ fs_access_type:
+ description:
+ - Access type of the filesystem snapshot.
+ - Required only during creation of filesystem snapshot.
+ - If not given, snapshot's access type will be C(Checkpoint).
+ type: str
+ choices: ['Checkpoint' , 'Protocol']
+ state:
+ description:
+ - The state option is used to mention the existence of the filesystem
+ snapshot.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+notes:
+ - Filesystem snapshot cannot be deleted, if it has nfs or smb share.
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Create Filesystem Snapshot
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ filesystem_name: "ansible_test_FS"
+ nas_server_name: "lglad069"
+ description: "Created using playbook"
+ auto_delete: True
+ fs_access_type: "Protocol"
+ state: "present"
+
+ - name: Create Filesystem Snapshot with expiry time
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap_1"
+ filesystem_name: "ansible_test_FS_1"
+ nas_server_name: "lglad069"
+ description: "Created using playbook"
+ expiry_time: "04/15/2021 2:30"
+ fs_access_type: "Protocol"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ state: "present"
+
+ - name: Update Filesystem Snapshot attributes
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ description: "Description updated"
+ auto_delete: False
+ expiry_time: "04/15/2021 5:30"
+ state: "present"
+
+ - name: Update Filesystem Snapshot attributes using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ expiry_time: "04/18/2021 8:30"
+ state: "present"
+
+ - name: Delete Filesystem Snapshot using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ state: "absent"
+
+ - name: Delete Filesystem Snapshot using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+filesystem_snapshot_details:
+ description: Details of the filesystem snapshot.
+ returned: When filesystem snapshot exists
+ type: dict
+ contains:
+ access_type:
+ description: Access type of filesystem snapshot.
+ type: str
+ attached_wwn:
+ description: Attached WWN details.
+ type: str
+ creation_time:
+ description: Creation time of filesystem snapshot.
+ type: str
+ creator_schedule:
+ description: Creator schedule of filesystem snapshot.
+ type: str
+ creator_type:
+ description: Creator type for filesystem snapshot.
+ type: str
+ creator_user:
+ description: Creator user for filesystem snapshot.
+ type: str
+ description:
+ description: Description of the filesystem snapshot.
+ type: str
+ expiration_time:
+ description: Date and time after which the filesystem snapshot
+ will expire.
+ type: str
+ is_auto_delete:
+ description: Is the filesystem snapshot is auto deleted or not.
+ type: bool
+ id:
+ description: Unique identifier of the filesystem snapshot
+ instance.
+ type: str
+ name:
+ description: The name of the filesystem snapshot.
+ type: str
+ size:
+ description: Size of the filesystem snapshot.
+ type: int
+ filesystem_name:
+ description: Name of the filesystem for which the snapshot exists.
+ type: str
+ filesystem_id:
+ description: Id of the filesystem for which the snapshot exists.
+ type: str
+ nas_server_name:
+ description: Name of the NAS server on which filesystem exists.
+ type: str
+ nas_server_id:
+ description: Id of the NAS server on which filesystem exists.
+ type: str
+ sample: {
+ "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT",
+ "attached_wwn": null,
+ "creation_time": "2022-10-21 04:42:53.951000+00:00",
+ "creator_schedule": null,
+ "creator_type": "SnapCreatorTypeEnum.USER_CUSTOM",
+ "creator_user": {
+ "id": "user_admin"
+ },
+ "description": "Created using playbook",
+ "existed": true,
+ "expiration_time": null,
+ "filesystem_id": "fs_137",
+ "filesystem_name": "test",
+ "hash": 8739894572587,
+ "host_access": null,
+ "id": "171798721695",
+ "io_limit_policy": null,
+ "is_auto_delete": true,
+ "is_modifiable": false,
+ "is_modified": false,
+ "is_read_only": true,
+ "is_system_snap": false,
+ "last_writable_time": null,
+ "lun": null,
+ "name": "test_FS_snap_1",
+ "nas_server_id": "nas_1",
+ "nas_server_name": "lglad072",
+ "parent_snap": null,
+ "size": 107374182400,
+ "snap_group": null,
+ "state": "SnapStateEnum.READY"
+ }
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+from datetime import datetime
+
+LOG = utils.get_logger('filesystem_snapshot')
+
+application_type = "Ansible/1.6.0"
+
+
+class FilesystemSnapshot(object):
+ """Class with Filesystem Snapshot operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_snapshot_parameters())
+
+ mutually_exclusive = [['snapshot_name', 'snapshot_id'],
+ ['filesystem_name', 'filesystem_id'],
+ ['nas_server_name', 'nas_server_id']]
+
+ required_one_of = [['snapshot_name', 'snapshot_id']]
+ # initialize the ansible module
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # filesystem snapshot details
+ self.result = {"changed": False,
+ 'filesystem_snapshot_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.snap_obj = utils.snap.UnitySnap(self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def validate_expiry_time(self, expiry_time):
+ """Validates the specified expiry_time"""
+ try:
+ datetime.strptime(expiry_time, '%m/%d/%Y %H:%M')
+ except ValueError:
+ error_msg = ("expiry_time: %s, not in MM/DD/YYYY HH:MM format." %
+ expiry_time)
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_update(self, fs_snapshot, description=None, auto_del=None,
+ expiry_time=None, fs_access_type=None):
+ """Determines whether to update the snapshot or not"""
+ snap_modify_dict = dict()
+
+ if fs_access_type and fs_access_type != fs_snapshot.access_type:
+ error_message = "Modification of access type is not allowed."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # If the snapshot has is_auto_delete True,
+ # Check if auto_delete in the input is either None or True
+ if expiry_time and fs_snapshot.is_auto_delete \
+ and (auto_del is None or auto_del):
+ self.module.fail_json(msg="expiry_time can be assigned when"
+ " auto delete is False.")
+ if auto_del is not None:
+ if fs_snapshot.expiration_time:
+ error_msg = "expiry_time for filesystem snapshot is set." \
+ " Once it is set then snapshot cannot" \
+ " be assigned to auto_delete policy."
+ self.module.fail_json(msg=error_msg)
+ if auto_del != fs_snapshot.is_auto_delete:
+ snap_modify_dict['is_auto_delete'] = auto_del
+
+ if description is not None and description != fs_snapshot.description:
+ snap_modify_dict['description'] = description
+
+ if to_update_expiry_time(fs_snapshot, expiry_time):
+ snap_modify_dict['expiry_time'] = expiry_time
+ LOG.info("Snapshot modification details: %s", snap_modify_dict)
+ return snap_modify_dict
+
+ def update_filesystem_snapshot(self, fs_snapshot, snap_modify_dict):
+ try:
+ duration = None
+ if 'expiry_time' in snap_modify_dict \
+ and snap_modify_dict['expiry_time']:
+ duration = convert_timestamp_to_sec(
+ snap_modify_dict['expiry_time'],
+ self.unity_conn.system_time)
+ if duration and duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time.")
+ if 'is_auto_delete' in snap_modify_dict \
+ and snap_modify_dict['is_auto_delete'] is not None:
+ auto_delete = snap_modify_dict['is_auto_delete']
+ else:
+ auto_delete = None
+ if 'description' in snap_modify_dict \
+ and (snap_modify_dict['description']
+ or len(snap_modify_dict['description']) == 0):
+ description = snap_modify_dict['description']
+ else:
+ description = None
+
+ fs_snapshot.modify(retentionDuration=duration,
+ isAutoDelete=auto_delete,
+ description=description)
+ fs_snapshot.update()
+ except Exception as e:
+ error_msg = "Failed to modify filesystem snapshot" \
+ " [name: %s , id: %s] with error %s."\
+ % (fs_snapshot.name, fs_snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_filesystem_snapshot(self, snap_name, storage_id,
+ description=None, auto_del=None,
+ expiry_time=None, fs_access_type=None):
+ try:
+ duration = None
+ if expiry_time:
+ duration = convert_timestamp_to_sec(
+ expiry_time, self.unity_conn.system_time)
+ if duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time.")
+
+ fs_snapshot = self.snap_obj.create(
+ cli=self.unity_conn._cli, storage_resource=storage_id,
+ name=snap_name, description=description,
+ is_auto_delete=auto_del, retention_duration=duration,
+ fs_access_type=fs_access_type)
+ return fs_snapshot
+ except Exception as e:
+ error_msg = "Failed to create filesystem snapshot" \
+ " %s with error %s" % (snap_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def is_snap_has_share(self, fs_snap):
+ try:
+ obj = self.unity_conn.get_nfs_share(snap=fs_snap) or \
+ self.unity_conn.get_cifs_share(snap=fs_snap)
+ if len(obj) > 0:
+ LOG.info("Snapshot has %s nfs/smb share/s", len(obj))
+ return True
+ except Exception as e:
+ msg = "Failed to get nfs/smb share from filesystem snapshot. " \
+ "error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ return False
+
+ def delete_fs_snapshot(self, fs_snapshot):
+ try:
+ # Checking whether nfs/smb share created from fs_snapshot
+ if self.is_snap_has_share(fs_snapshot):
+ msg = "Filesystem snapshot cannot be deleted because it has " \
+ "nfs/smb share"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ fs_snapshot.delete()
+ return None
+
+ except Exception as e:
+ error_msg = "Failed to delete filesystem snapshot" \
+ " [name: %s, id: %s] with error %s." \
+ % (fs_snapshot.name, fs_snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_fs_snapshot_obj(self, name=None, id=None):
+ fs_snapshot = id if id else name
+ msg = "Failed to get details of filesystem snapshot %s with error %s."
+ try:
+ fs_snap_obj = self.unity_conn.get_snap(name=name, _id=id)
+ if fs_snap_obj and fs_snap_obj.existed:
+ LOG.info("Successfully got the filesystem snapshot object "
+ "%s.", fs_snap_obj)
+ else:
+ fs_snap_obj = None
+ return fs_snap_obj
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = ("Incorrect username or password , %s" % e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = msg % (fs_snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = msg % (fs_snapshot, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ err_msg = msg % (fs_snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_filesystem_obj(self, nas_server=None, name=None, id=None):
+ filesystem = id if id else name
+ try:
+ obj_fs = None
+ if name:
+ if not nas_server:
+ err_msg = "NAS Server is required to get the FileSystem."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ obj_fs = self.unity_conn.get_filesystem(name=name,
+ nas_server=nas_server)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ if id:
+ if nas_server:
+ obj_fs = self.unity_conn\
+ .get_filesystem(id=id, nas_server=nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(id=id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ except Exception as e:
+ error_msg = "Failed to get filesystem %s with error %s."\
+ % (filesystem, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, name=None, id=None):
+ nas_server = id if id else name
+ error_msg = ("Failed to get NAS server %s." % nas_server)
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if (name and obj_nas.existed) or (id and obj_nas.existed):
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ else:
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ except Exception as e:
+ error_msg = "Failed to get NAS server %s with error %s."\
+ % (nas_server, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_fs_snapshot_details_dict(self, fs_snapshot):
+ """ Add name and id of storage resource to filesystem snapshot
+ details """
+
+ snapshot_dict = fs_snapshot._get_properties()
+ del snapshot_dict['storage_resource']
+
+ snapshot_dict['filesystem_name'] = fs_snapshot.storage_resource.name
+ snapshot_dict['filesystem_id'] = fs_snapshot.storage_resource.filesystem.id
+
+ obj_fs = self.unity_conn.\
+ get_filesystem(id=fs_snapshot.storage_resource.filesystem.id)
+ if obj_fs and obj_fs.existed:
+ snapshot_dict['nas_server_name'] = obj_fs.nas_server[0].name
+ snapshot_dict['nas_server_id'] = obj_fs.nas_server[0].id
+
+ return snapshot_dict
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on snapshot module based on parameters
+ chosen in playbook
+ """
+ snapshot_name = self.module.params['snapshot_name']
+ snapshot_id = self.module.params['snapshot_id']
+ filesystem_name = self.module.params['filesystem_name']
+ filesystem_id = self.module.params['filesystem_id']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ auto_delete = self.module.params['auto_delete']
+ expiry_time = self.module.params['expiry_time']
+ description = self.module.params['description']
+ fs_access_type = self.module.params['fs_access_type']
+ state = self.module.params['state']
+ nas_server_resource = None
+ filesystem_resource = None
+ changed = False
+
+ LOG.info("Getting Filesystem Snapshot details.")
+ fs_snapshot = self.get_fs_snapshot_obj(name=snapshot_name,
+ id=snapshot_id)
+
+ msg = "Filesystem Snapshot details: %s." % str(fs_snapshot)
+ LOG.info(msg)
+
+ # Get NAS server Object
+ if nas_server_name is not None:
+ if nas_server_name == "" or nas_server_name.isspace():
+ self.module.fail_json(msg="Invalid nas_server_name given,"
+ " Please provide a valid name.")
+ nas_server_resource = self\
+ .get_nas_server_obj(name=nas_server_name)
+ elif nas_server_id is not None:
+ if nas_server_id == "" or nas_server_id.isspace():
+ self.module.fail_json(msg="Invalid nas_server_id given,"
+ " Please provide a valid ID.")
+ nas_server_resource = self.get_nas_server_obj(id=nas_server_id)
+
+ # Get Filesystem Object
+ if filesystem_name is not None:
+ if filesystem_name == "" or filesystem_name.isspace():
+ self.module.fail_json(msg="Invalid filesystem_name given,"
+ " Please provide a valid name.")
+ filesystem_resource = self\
+ .get_filesystem_obj(nas_server=nas_server_resource,
+ name=filesystem_name)
+ fs_res_id = filesystem_resource.storage_resource.id
+ elif filesystem_id is not None:
+ if filesystem_id == "" or filesystem_id.isspace():
+ self.module.fail_json(msg="Invalid filesystem_id given,"
+ " Please provide a valid ID.")
+ filesystem_resource = self\
+ .get_filesystem_obj(id=filesystem_id)
+ fs_res_id = filesystem_resource[0].storage_resource.id
+
+ # Check for error, if user tries to create a filesystem snapshot
+ # with the same name.
+ if fs_snapshot and filesystem_resource and \
+ (fs_snapshot.storage_resource.id
+ != fs_res_id):
+ self.module.fail_json(
+ msg="Snapshot %s is of %s storage resource. Cannot create new"
+ " snapshot with same name for %s storage resource."
+ % (fs_snapshot.name, fs_snapshot.storage_resource.name,
+ filesystem_resource.storage_resource.name))
+
+ # check for valid expiry_time
+ if expiry_time is not None and \
+ (expiry_time == "" or expiry_time.isspace()):
+ self.module.fail_json(msg="Please provide valid expiry_time,"
+ " empty expiry_time given.")
+ if expiry_time:
+ self.validate_expiry_time(expiry_time)
+
+ # Check if in input auto_delete is True and expiry_time is not None
+ if expiry_time and auto_delete:
+ error_msg = "Cannot set expiry_time if auto_delete given as True."
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ # check for fs_access_type
+ if fs_access_type is not None:
+ if (fs_access_type == "" or fs_access_type.isspace()):
+ self.module.fail_json(msg="Please provide valid "
+ "fs_access_type, empty "
+ "fs_access_type given.")
+ if fs_access_type == "Checkpoint":
+ fs_access_type = utils.FilesystemSnapAccessTypeEnum.CHECKPOINT
+ elif fs_access_type == "Protocol":
+ fs_access_type = utils.FilesystemSnapAccessTypeEnum.PROTOCOL
+
+ # Check whether to modify the filesystem snapshot or not
+ fs_snap_modify_dict = dict()
+ if state == 'present' and fs_snapshot:
+ fs_snap_modify_dict = self\
+ .to_update(fs_snapshot, description=description,
+ auto_del=auto_delete, expiry_time=expiry_time,
+ fs_access_type=fs_access_type)
+
+ # Create Filesystem Snapshot
+ if not fs_snapshot and state == "present":
+ LOG.info("Creating the filesystem snapshot.")
+
+ if snapshot_id:
+ self.module.fail_json(msg="Creation of Filesystem Snapshot is"
+ " allowed using snapshot_name only,"
+ " snapshot_id given.")
+ if snapshot_name == "" or snapshot_name.isspace():
+ self.module.fail_json(msg="snapshot_name is required for"
+ " creation of the filesystem"
+ " snapshot, empty snapshot_name"
+ " given.")
+ if not filesystem_resource:
+ self.module.fail_json(msg="filesystem_name or filesystem_id"
+ " required to create a snapshot.")
+
+ fs_snapshot = self.create_filesystem_snapshot(
+ snapshot_name,
+ fs_res_id,
+ description,
+ auto_delete,
+ expiry_time,
+ fs_access_type)
+ changed = True
+
+ # Update the Snapshot
+ if fs_snapshot and state == "present" and fs_snap_modify_dict:
+ LOG.info("Updating the Filesystem Snapshot.")
+ self.update_filesystem_snapshot(fs_snapshot, fs_snap_modify_dict)
+ changed = True
+
+ # Delete the Filesystem Snapshot
+ if state == "absent" and fs_snapshot:
+ fs_snapshot = self.delete_fs_snapshot(fs_snapshot)
+ changed = True
+
+ # Add filesystem snapshot details to the result.
+ if fs_snapshot:
+ fs_snapshot.update()
+ self.result["filesystem_snapshot_details"] = \
+ self.create_fs_snapshot_details_dict(fs_snapshot)
+ else:
+ self.result["filesystem_snapshot_details"] = {}
+
+ self.result["changed"] = changed
+ self.module.exit_json(**self.result)
+
+
+def to_update_expiry_time(fs_snapshot, expiry_time=None):
+ """ Check whether to update expiry_time or not"""
+ if not expiry_time:
+ return False
+ if fs_snapshot.expiration_time is None:
+ return True
+ if convert_timestamp_to_sec(expiry_time, fs_snapshot.expiration_time)\
+ != 0:
+ return True
+ return False
+
+
+def convert_timestamp_to_sec(expiry_time, snap_time):
+ """Converts the time difference to seconds"""
+ snap_time_str = snap_time.strftime('%m/%d/%Y %H:%M')
+ snap_timestamp = datetime.strptime(snap_time_str, '%m/%d/%Y %H:%M')
+ expiry_timestamp = datetime.strptime(expiry_time, "%m/%d/%Y %H:%M")
+ return int((expiry_timestamp - snap_timestamp).total_seconds())
+
+
+def get_snapshot_parameters():
+ """This method provide parameter required for the ansible filesystem
+ snapshot module on Unity"""
+ return dict(
+ snapshot_name=dict(required=False, type='str'),
+ snapshot_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ filesystem_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ auto_delete=dict(required=False, type='bool'),
+ expiry_time=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ fs_access_type=dict(required=False, type='str',
+ choices=['Checkpoint', 'Protocol']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity Filesystem Snapshot object and perform actions on it
+ based on user input from playbook"""
+ obj = FilesystemSnapshot()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/host.py b/ansible_collections/dellemc/unity/plugins/modules/host.py
new file mode 100644
index 000000000..21a5fbae1
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/host.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing host on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: host
+
+version_added: '1.1.0'
+
+short_description: Manage Host operations on Unity
+
+description:
+- The Host module contains the operations
+ Creation of a Host,
+ Addition of initiators to Host,
+ Removal of initiators from Host,
+ Modification of host attributes,
+ Get details of a Host,
+ Deletion of a Host,
+ Addition of network address to Host,
+ Removal of network address from Host.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+
+options:
+ host_name:
+ description:
+ - Name of the host.
+ - Mandatory for host creation.
+ type: str
+
+ host_id:
+ description:
+ - Unique identifier of the host.
+ - Host Id is auto generated during creation.
+ - Except create, all other operations require either I(host_id) or Ihost_name).
+ type: str
+
+ description:
+ description:
+ - Host description.
+ type: str
+
+ host_os:
+ description:
+ - Operating system running on the host.
+ choices: ['AIX', 'Citrix XenServer', 'HP-UX', 'IBM VIOS', 'Linux',
+ 'Mac OS', 'Solaris', 'VMware ESXi', 'Windows Client', 'Windows Server']
+ type: str
+
+ new_host_name:
+ description:
+ - New name for the host.
+ - Only required in rename host operation.
+ type: str
+
+ initiators:
+ description:
+ - List of initiators to be added/removed to/from host.
+ type: list
+ elements: str
+
+ initiator_state:
+ description:
+ - State of the initiator.
+ choices: [present-in-host , absent-in-host]
+ type: str
+
+ network_address:
+ description:
+ - Network address to be added/removed to/from the host.
+ - Enter valid IPV4 or host name.
+ type: str
+
+ network_address_state:
+ description:
+ - State of the Network address.
+ choices: [present-in-host , absent-in-host]
+ type: str
+
+ state:
+ description:
+ - State of the host.
+ choices: [present , absent]
+ type: str
+ required: true
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+- name: Create empty Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host"
+ host_os: "Linux"
+ description: "ansible-test-host"
+ state: "present"
+
+- name: Create Host with Initiators
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-1"
+ host_os: "Linux"
+ description: "ansible-test-host-1"
+ initiators:
+ - "iqn.1994-05.com.redhat:c38e6e8cfd81"
+ - "20:00:00:90:FA:13:81:8D:10:00:00:90:FA:13:81:8D"
+ initiator_state: "present-in-host"
+ state: "present"
+
+- name: Modify Host using host_id
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_id: "Host_253"
+ new_host_name: "ansible-test-host-2"
+ host_os: "Mac OS"
+ description: "Ansible tesing purpose"
+ state: "present"
+
+- name: Add Initiators to Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ initiators:
+ - "20:00:00:90:FA:13:81:8C:10:00:00:90:FA:13:81:8C"
+ initiator_state: "present-in-host"
+ state: "present"
+
+- name: Get Host details using host_name
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ state: "present"
+
+- name: Get Host details using host_id
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_id: "Host_253"
+ state: "present"
+
+- name: Delete Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ state: "absent"
+
+- name: Add network address to Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "{{host_name}}"
+ network_address: "192.168.1.2"
+ network_address_state: "present-in-host"
+ state: "present"
+
+- name: Delete network address from Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "{{host_name}}"
+ network_address: "192.168.1.2"
+ network_address_state: "absent-in-host"
+ state: "present"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+host_details:
+ description: Details of the host.
+ returned: When host exists.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the host.
+ type: str
+ name:
+ description: The name of the host.
+ type: str
+ description:
+ description: Description about the host.
+ type: str
+ fc_host_initiators:
+ description: Details of the FC initiators associated with
+ the host.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the FC initiator path.
+ type: str
+ name:
+ description: FC Qualified Name (WWN) of the initiator.
+ type: str
+ paths:
+ description: Details of the paths associated with the FC initiator.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the path.
+ type: str
+ is_logged_in:
+ description: Indicates whether the host initiator is logged into the storage system.
+ type: bool
+ iscsi_host_initiators:
+ description: Details of the ISCSI initiators associated
+ with the host.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the ISCSI initiator path.
+ type: str
+ name:
+ description: ISCSI Qualified Name (IQN) of the initiator.
+ type: str
+ paths:
+ description: Details of the paths associated with the ISCSI initiator.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the path.
+ type: str
+ is_logged_in:
+ description: Indicates whether the host initiator is logged into the storage system.
+ type: bool
+ network_addresses:
+ description: List of network addresses mapped to the host.
+ type: list
+ os_type:
+ description: Operating system running on the host.
+ type: str
+ type:
+ description: HostTypeEnum of the host.
+ type: str
+ host_luns:
+ description: Details of luns attached to host.
+ type: list
+ sample: {
+ "auto_manage_type": "HostManageEnum.UNKNOWN",
+ "datastores": null,
+ "description": "ansible-test-host-1",
+ "existed": true,
+ "fc_host_initiators": [
+ {
+ "id": "HostInitiator_1",
+ "name": "HostName_1",
+ "paths": [
+ {
+ "id": "HostInitiator_1_Id1",
+ "is_logged_in": true
+ },
+ {
+ "id": "HostInitiator_1_Id2",
+ "is_logged_in": true
+ }
+ ]
+ }
+ ],
+ "hash": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "health": {
+ "UnityHealth": {
+ "hash": 8764429420954
+ }
+ },
+ "host_container": null,
+ "host_luns": [],
+ "host_polled_uuid": null,
+ "host_pushed_uuid": null,
+ "host_uuid": null,
+ "host_v_vol_datastore": null,
+ "id": "Host_2198",
+ "iscsi_host_initiators": [
+ {
+ "id": "HostInitiator_2",
+ "name": "HostName_2",
+ "paths": [
+ {
+ "id": "HostInitiator_2_Id1",
+ "is_logged_in": true
+ },
+ {
+ "id": "HostInitiator_2_Id2",
+ "is_logged_in": true
+ }
+ ]
+ }
+ ],
+ "last_poll_time": null,
+ "name": "ansible-test-host-1",
+ "network_addresses": [],
+ "os_type": "Linux",
+ "registration_type": null,
+ "storage_resources": null,
+ "tenant": null,
+ "type": "HostTypeEnum.HOST_MANUAL",
+ "vms": null
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import ipaddress
+
+LOG = utils.get_logger('host')
+
+application_type = "Ansible/1.6.0"
+
+
+class Host(object):
+ """Class with Host operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_host_parameters())
+
+ mutually_exclusive = [['host_name', 'host_id']]
+ required_one_of = [['host_name', 'host_id']]
+ required_together = [['network_address', 'network_address_state']]
+
+ """ initialize the ansible module """
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.unity = utils.get_unity_unisphere_connection(self.module.params, application_type)
+ LOG.info('Got the unity instance for provisioning on Unity')
+
+ def get_host_count(self, host_name):
+ """ To get the count of hosts with same host_name """
+
+ hosts = []
+ host_count = 0
+ hosts = utils.host.UnityHostList.get(cli=self.unity._cli,
+ name=host_name)
+ host_count = len(hosts)
+ return host_count
+
+ def get_host_details(self, host_id=None, host_name=None):
+ """ Get details of a given host """
+
+ host_id_or_name = host_id if host_id else host_name
+ try:
+ LOG.info("Getting host %s details", host_id_or_name)
+ if host_id:
+ host_details = self.unity.get_host(_id=host_id)
+ if host_details.name is None:
+ return None
+ if host_name:
+
+ ''' get the count of hosts with same host_name '''
+ host_count = self.get_host_count(host_name)
+
+ if host_count < 1:
+ return None
+ elif host_count > 1:
+ error_message = "Duplicate hosts found: There are "\
+ + host_count + " hosts(s) with the same" \
+ " host_name: " + host_name
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ else:
+ host_details = self.unity.get_host(name=host_name)
+
+ return host_details
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = 'Incorrect username or password provided.'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = "Got HTTP Connection Error while getting host " \
+ "details %s : Error %s " % (host_id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.UnityResourceNotFoundError as e:
+ error_message = "Failed to get details of host " \
+ "{0} with error {1}".format(host_id_or_name,
+ str(e))
+ LOG.error(error_message)
+ return None
+ except Exception as e:
+ error_message = "Got error %s while getting details of host %s" \
+ % (str(e), host_id_or_name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def create_host(self, host_name):
+ """ Create a new host """
+ try:
+ description = self.module.params['description']
+ host_os = self.module.params['host_os']
+ host_type = utils.HostTypeEnum.HOST_MANUAL
+ initiators = self.module.params['initiators']
+ initiator_state = self.module.params['initiator_state']
+ empty_initiators_flag = False
+
+ if (initiators and initiator_state == 'absent-in-host'):
+ error_message = "Incorrect 'initiator_state' given."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if (initiators is None or len(initiators) == 0
+ or not initiator_state
+ or initiator_state == 'absent-in-host'):
+ empty_initiators_flag = True
+
+ """ if any of the Initiators is invalid or already mapped """
+ if (initiators and initiator_state == 'present-in-host'):
+ unmapped_initiators \
+ = self.get_list_unmapped_initiators(initiators)
+ if unmapped_initiators is None \
+ or len(unmapped_initiators) < len(initiators):
+ error_message = "Provide valid initiators."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ if not empty_initiators_flag:
+ self.validate_initiators(initiators)
+ LOG.info("Creating empty host %s ", host_name)
+ new_host = utils.host.UnityHost.create(self.unity._cli, name=host_name, desc=description,
+ os=host_os, host_type=host_type)
+ if not empty_initiators_flag:
+ host_details = self.unity.get_host(name=host_name)
+ LOG.info("Adding initiators to %s host", host_name)
+ result, new_host \
+ = self.add_initiator_to_host(host_details, initiators)
+ return True, new_host
+ except Exception as e:
+ error_message = "Got error %s while creation of host %s" \
+ % (str(e), host_name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def validate_initiators(self, initiators):
+ results = []
+ for item in initiators:
+ results.append(utils.is_initiator_valid(item))
+ if False in results:
+ error_message = "One or more initiator provided is not valid, please provide valid initiators"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_host_initiators_list(self, host_details):
+ """ Get the list of existing initiators in host"""
+
+ existing_initiators = []
+ if host_details.fc_host_initiators is not None:
+ fc_len = len(host_details.fc_host_initiators)
+ if fc_len > 0:
+ for count in range(fc_len):
+ """ get initiator 'wwn' id"""
+ ini_id \
+ = host_details.fc_host_initiators.initiator_id[count]
+
+ """ update existing_initiators list with 'wwn' """
+ existing_initiators.append(ini_id)
+
+ if host_details.iscsi_host_initiators is not None:
+ iscsi_len = len(host_details.iscsi_host_initiators)
+ if iscsi_len > 0:
+ for count in range(iscsi_len):
+ """ get initiator 'iqn' id"""
+ ini_id \
+ = host_details.iscsi_host_initiators.\
+ initiator_id[count]
+
+ """ update existing_initiators list with 'iqn' """
+ existing_initiators.append(ini_id)
+ return existing_initiators
+
+ def is_host_modified(self, host_details):
+ """ Determines whether the Host details are to be updated or not """
+ LOG.info("Checking host attribute values.")
+ modified_flag = False
+
+ if (self.module.params['description'] is not None
+ and self.module.params['description']
+ != host_details.description) \
+ or (self.module.params['host_os'] is not None
+ and self.module.params['host_os'] != host_details.os_type) \
+ or (self.module.params['new_host_name'] is not None
+ and self.module.params[
+ 'new_host_name'] != host_details.name) \
+ or (self.module.params['initiators'] is not None
+ and self.module.params['initiators']
+ != self.get_host_initiators_list(host_details)):
+ LOG.info("Modification required.")
+ modified_flag = True
+
+ return modified_flag
+
+ def modify_host(self, host_details, new_host_name=None, description=None,
+ host_os=None):
+ """ Modify a host """
+ try:
+ hosts = utils.host.UnityHostList.get(cli=self.unity._cli)
+ host_names_list = hosts.name
+ for name in host_names_list:
+ if new_host_name == name:
+ error_message = "Cannot modify name, new_host_name: " \
+ + new_host_name + " already in use."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ host_details.modify(name=new_host_name, desc=description,
+ os=host_os)
+ return True
+
+ except Exception as e:
+ error_message = "Got error %s while modifying host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_list_unmapped_initiators(self, initiators, host_id=None):
+ """ Get the list of those initiators which are
+ not mapped to any host"""
+
+ unmapped_initiators = []
+ for id in initiators:
+ initiator_details = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, initiator_id=id) \
+ ._get_properties()
+
+ """ if an already existing initiator is passed along with an
+ unmapped initiator"""
+ if None in initiator_details["parent_host"]:
+ unmapped_initiators.append(initiator_details
+ ["initiator_id"][0])
+ elif not initiator_details["parent_host"]:
+ unmapped_initiators.append(id)
+ else:
+ error_message = "Initiator " + id + " mapped to another Host."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ return unmapped_initiators
+
+ def add_initiator_to_host(self, host_details, initiators):
+ """ Add initiator to host """
+
+ try:
+ existing_initiators = self.get_host_initiators_list(host_details)
+
+ """ if current and exisitng initiators are same"""
+ if initiators \
+ and (set(initiators).issubset(set(existing_initiators))):
+ LOG.info("Initiators are already present in host: %s",
+ host_details.name)
+ return False, host_details
+
+ """ get the list of non-mapped initiators out of the
+ given initiators"""
+ host_id = host_details.id
+ unmapped_initiators \
+ = self.get_list_unmapped_initiators(initiators, host_id)
+
+ """ if any of the Initiators is invalid or already mapped """
+ if unmapped_initiators is None \
+ or len(unmapped_initiators) < len(initiators):
+ error_message = "Provide valid initiators."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ LOG.info("Adding initiators to host %s", host_details.name)
+ for id in unmapped_initiators:
+ host_details.add_initiator(uid=id)
+ updated_host \
+ = self.unity.get_host(name=host_details.name)
+ return True, updated_host
+
+ except Exception as e:
+ error_message = "Got error %s while adding initiator to host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def remove_initiator_from_host(self, host_details, initiators):
+ """ Remove initiator from host """
+
+ try:
+ existing_initiators = self.get_host_initiators_list(host_details)
+
+ if existing_initiators is None:
+ LOG.info("No exisiting initiators in host: %s",
+ host_details.name)
+ return False, host_details
+
+ if not (set(initiators).issubset(set(existing_initiators))):
+ LOG.info("Initiators already absent in host: %s",
+ host_details.name)
+ return False, host_details
+
+ LOG.info("Removing initiators from host %s", host_details.name)
+
+ if len(initiators) > 1:
+ self.check_if_initiators_logged_in(initiators)
+
+ for id in initiators:
+ initiator_details = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, initiator_id=id) \
+ ._get_properties()
+
+ """ if initiator has no active paths, then remove it """
+ if initiator_details["paths"][0] is None:
+ LOG.info("Initiator Path does not exist.")
+ host_details.delete_initiator(uid=id)
+ updated_host \
+ = self.unity.get_host(name=host_details.name)
+
+ else:
+ """ Checking for initiator logged_in state """
+ for path in initiator_details["paths"][0]["UnityHostInitiatorPathList"]:
+ path_id = path["UnityHostInitiatorPath"]["id"]
+
+ path_id_obj = utils.host.UnityHostInitiatorPathList \
+ .get(cli=self.unity._cli, _id=path_id)
+
+ path_id_details = path_id_obj._get_properties()
+
+ """ if is_logged_in is True, can't remove initiator"""
+ if (path_id_details["is_logged_in"]):
+ error_message = "Cannot remove initiator "\
+ + id + ", as it is logged in " \
+ "the with host."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ elif (not path_id_details["is_logged_in"]):
+ """ if is_logged_in is False, remove initiator """
+ path_id_obj.delete()
+
+ else:
+ """ if logged_in state does not exist """
+ error_message = " logged_in state does not " \
+ "exist for initiator " + id + "."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ host_details.delete_initiator(uid=id)
+ updated_host \
+ = self.unity.get_host(name=host_details.name)
+
+ return True, updated_host
+
+ except Exception as e:
+ error_message = "Got error %s while removing initiator from " \
+ "host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def check_if_initiators_logged_in(self, initiators):
+ """ Checks if any of the initiators is of type logged-in"""
+
+ for item in initiators:
+ initiator_details = (utils.host.UnityHostInitiatorList
+ .get(cli=self.unity._cli, initiator_id=item)
+ ._get_properties())
+ if initiator_details["paths"][0] is not None and "UnityHostInitiatorPathList" in initiator_details["paths"][0]:
+ error_message = "Removal operation cannot be done since host has logged in initiator(s)"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def delete_host(self, host_details):
+ """ Delete an existing host """
+
+ try:
+ host_details.delete()
+ return True
+ except Exception as e:
+ error_message = "Got error %s while deletion of host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_iscsi_host_initiators_details(self, iscsi_host_initiators):
+ """ Get the details of existing ISCSI initiators in host"""
+
+ iscsi_initiator_list = []
+ for iscsi in iscsi_host_initiators:
+ iscsi_initiator_details = self.unity.get_initiator(_id=iscsi.id)
+ iscsi_path_list = []
+ if iscsi_initiator_details.paths is not None:
+ for path in iscsi_initiator_details.paths:
+ iscsi_path_list.append({
+ 'id': path.id,
+ 'is_logged_in': path.is_logged_in
+ })
+ iscsi_initiator_list.append({
+ 'id': iscsi_initiator_details.id,
+ 'name': iscsi_initiator_details.initiator_id,
+ 'paths': iscsi_path_list
+ })
+ return iscsi_initiator_list
+
+ def get_host_network_address_list(self, host_details):
+ network_address_list = []
+ if host_details and host_details.host_ip_ports is not None:
+ for port in host_details.host_ip_ports:
+ network_address_list.append(port.address)
+ return network_address_list
+
+ def manage_network_address(self, host_details, network_address_list,
+ network_address, network_address_state):
+ try:
+ is_mapped = False
+ changed = False
+ for addr in network_address_list:
+ if addr.lower() == network_address.lower():
+ is_mapped = True
+ break
+ if not is_mapped and network_address_state == 'present-in-host':
+ LOG.info("Adding network address %s to Host %s", network_address,
+ host_details.name)
+ host_details.add_ip_port(network_address)
+ changed = True
+ elif is_mapped and network_address_state == 'absent-in-host':
+ LOG.info("Deleting network address %s from Host %s", network_address,
+ host_details.name)
+ host_details.delete_ip_port(network_address)
+ changed = True
+
+ if changed:
+ updated_host = self.unity.get_host(name=host_details.name)
+ network_address_list = self.get_host_network_address_list(updated_host)
+ return network_address_list, changed
+ except Exception as e:
+ error_message = "Got error %s while modifying network address %s of host %s" \
+ % (str(e), network_address, host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_host_lun_list(self, host_details):
+ """ Get luns attached to host"""
+ host_luns_list = []
+ if host_details and host_details.host_luns is not None:
+ for lun in host_details.host_luns.lun:
+ host_lun = {"name": lun.name, "id": lun.id}
+ host_luns_list.append(host_lun)
+ return host_luns_list
+
+ def get_fc_host_initiators_details(self, fc_host_initiators):
+ """ Get the details of existing FC initiators in host"""
+
+ fc_initiator_list = []
+ for fc in fc_host_initiators:
+ fc_initiator_details = self.unity.get_initiator(_id=fc.id)
+ fc_path_list = []
+ if fc_initiator_details.paths is not None:
+ for path in fc_initiator_details.paths:
+ fc_path_list.append({
+ 'id': path.id,
+ 'is_logged_in': path.is_logged_in
+ })
+ fc_initiator_list.append({
+ 'id': fc_initiator_details.id,
+ 'name': fc_initiator_details.initiator_id,
+ 'paths': fc_path_list
+ })
+ return fc_initiator_list
+
+ def perform_module_operation(self):
+ """ Perform different actions on host based on user parameter
+ chosen in playbook """
+
+ host_name = self.module.params['host_name']
+ host_id = self.module.params['host_id']
+ description = self.module.params['description']
+ host_os = self.module.params['host_os']
+ new_host_name = self.module.params['new_host_name']
+ initiator_state = self.module.params['initiator_state']
+ initiators = self.module.params['initiators']
+ network_address = self.module.params['network_address']
+ network_address_state = self.module.params['network_address_state']
+ state = self.module.params['state']
+
+ if host_name and len(host_name) > 255:
+ err_msg = "'host_name' is greater than 255 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if new_host_name and len(new_host_name) > 255:
+ err_msg = "'new_host_name' is greater than 255 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if description and len(description) > 255:
+ err_msg = "'description' is greater than 255 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if not initiators and initiator_state:
+ err_msg = "'initiator_state' is given, " \
+ "'initiators' are not specified"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if not initiator_state and initiators:
+ err_msg = "'initiators' are given, " \
+ "'initiator_state' is not specified"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ # result is a dictionary that contains changed status and
+ # host details
+ result = dict(
+ changed=False,
+ host_details={}
+ )
+
+ ''' Get host details based on host_name/host_id'''
+ host_details = self.get_host_details(host_id, host_name)
+ if not host_details and state == 'present':
+ if host_id:
+ err_msg = "Invalid argument 'host_id' while " \
+ "creating a host"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ if not host_name:
+ err_msg = "host_name is required to create a host"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ if new_host_name:
+ err_msg = "Invalid argument 'new_host_name' while " \
+ "creating a host"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if (initiators and initiator_state == 'absent-in-host'):
+ error_message = "Incorrect 'initiator_state' given."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # Create new host
+ LOG.info("Creating host: %s", host_name)
+ result['changed'], host_details \
+ = self.create_host(host_name)
+ result['host_details'] = host_details._get_properties()
+
+ # Modify host (Attributes and ADD/REMOVE Initiators)
+ elif (state == 'present' and host_details):
+ modified_flag = self.is_host_modified(host_details)
+ if modified_flag:
+
+ # Modify host
+ result['changed'] = self.modify_host(host_details,
+ new_host_name,
+ description,
+ host_os)
+ if new_host_name:
+ host_details = self.get_host_details(host_id,
+ new_host_name)
+ else:
+ host_details = self.get_host_details(host_id, host_name)
+ result['host_details'] = host_details._get_properties()
+
+ # Add Initiators to host
+ if (initiator_state == 'present-in-host' and initiators
+ and len(initiators) > 0):
+ LOG.info("Adding Initiators to Host %s",
+ host_details.name)
+ result['changed'], host_details \
+ = self.add_initiator_to_host(host_details, initiators)
+ result['host_details'] = host_details._get_properties()
+
+ else:
+ LOG.info('Host modification is not applicable, '
+ 'as none of the attributes has changed.')
+ result['changed'] = False
+ result['host_details'] = host_details._get_properties()
+
+ # Remove initiators from host
+ if (host_details and initiator_state == 'absent-in-host'
+ and initiators and len(initiators) > 0):
+ LOG.info("Removing Initiators from Host %s",
+ host_details.name)
+ result['changed'], host_details \
+ = self.remove_initiator_from_host(host_details,
+ initiators)
+ result['host_details'] = host_details._get_properties()
+
+ """ display WWN/IQN w.r.t. initiators mapped to host,
+ if host exists """
+ if host_details and host_details.fc_host_initiators is not None:
+ host_details.fc_host_initiators = self.get_fc_host_initiators_details(host_details.fc_host_initiators)
+ result['host_details'] = host_details._get_properties()
+ if host_details and host_details.iscsi_host_initiators is not None:
+ host_details.iscsi_host_initiators = self.get_iscsi_host_initiators_details(host_details.iscsi_host_initiators)
+ result['host_details'] = host_details._get_properties()
+
+ ''' Get host luns details and network addresses'''
+ if result['host_details']:
+ result['host_details']['host_luns'] = self.get_host_lun_list(host_details)
+ result['host_details']['network_addresses'] = self.get_host_network_address_list(host_details)
+ if 'host_ip_ports' in result['host_details']:
+ del result['host_details']['host_ip_ports']
+
+ # manage network address
+ if host_details is not None and network_address_state is not None:
+ self.validate_network_address_params(network_address)
+ network_address_list, changed = self.manage_network_address(
+ host_details,
+ result['host_details']['network_addresses'],
+ network_address,
+ network_address_state)
+ result['host_details']['network_addresses'] = network_address_list
+ result['changed'] = changed
+
+ # Delete a host
+ if state == 'absent':
+ if host_details:
+ LOG.info("Deleting host %s", host_details.name)
+ result['changed'] = self.delete_host(host_details)
+ else:
+ result['changed'] = False
+ result['host_details'] = []
+
+ self.module.exit_json(**result)
+
+ def validate_network_address_params(self, network_address):
+ if '.' in network_address and not is_valid_ip(network_address):
+ err_msg = 'Please enter valid IPV4 address for network address'
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if len(network_address) < 1 or len(network_address) > 63:
+ err_msg = "'network_address' should be in range of 1 to 63 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if utils.has_special_char(network_address) or ' ' in network_address:
+ err_msg = 'Please enter valid IPV4 address or host name for network address'
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+
+def is_valid_ip(address):
+ try:
+ ipaddress.ip_address(address)
+ return True
+ except ValueError:
+ return False
+
+
+def get_host_parameters():
+ """This method provides parameters required for the ansible host
+ module on Unity"""
+ return dict(
+ host_name=dict(required=False, type='str'),
+ host_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ host_os=dict(required=False, type='str',
+ choices=['AIX', 'Citrix XenServer', 'HP-UX',
+ 'IBM VIOS', 'Linux', 'Mac OS', 'Solaris',
+ 'VMware ESXi', 'Windows Client',
+ 'Windows Server']),
+ new_host_name=dict(required=False, type='str'),
+ initiators=dict(required=False, type='list', elements='str'),
+ initiator_state=dict(required=False, type='str',
+ choices=['present-in-host',
+ 'absent-in-host']),
+ network_address=dict(required=False, type='str'),
+ network_address_state=dict(required=False, type='str',
+ choices=['present-in-host',
+ 'absent-in-host']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity host object and perform action on it
+ based on user input from playbook"""
+ obj = Host()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/info.py b/ansible_collections/dellemc/unity/plugins/modules/info.py
new file mode 100644
index 000000000..e89d86335
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/info.py
@@ -0,0 +1,1784 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for Gathering information about Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: info
+
+version_added: '1.1.0'
+
+short_description: Gathering information about Unity
+
+description:
+- Gathering information about Unity storage system includes
+ Get the details of Unity array,
+ Get list of Hosts in Unity array,
+ Get list of FC initiators in Unity array,
+ Get list of iSCSI initiators in Unity array,
+ Get list of Consistency groups in Unity array,
+ Get list of Storage pools in Unity array,
+ Get list of Volumes in Unity array,
+ Get list of Snapshot schedules in Unity array,
+ Get list of NAS servers in Unity array,
+ Get list of File systems in Unity array,
+ Get list of Snapshots in Unity array,
+ Get list of SMB shares in Unity array,
+ Get list of NFS exports in Unity array,
+ Get list of User quotas in Unity array,
+ Get list of Quota tree in Unity array,
+ Get list of NFS Servers in Unity array,
+ Get list of CIFS Servers in Unity array.
+ Get list of Ethernet ports in Unity array.
+ Get list of File interfaces used in Unity array.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
+options:
+ gather_subset:
+ description:
+ - List of string variables to specify the Unity storage system entities
+ for which information is required.
+ choices: [host, fc_initiator, iscsi_initiator, cg, storage_pool, vol,
+ snapshot_schedule, nas_server, file_system, snapshot, nfs_export,
+ smb_share, user_quota, tree_quota, disk_group, nfs_server, cifs_server, ethernet_port, file_interface]
+ type: list
+ elements: str
+
+notes:
+ - The I(check_mode) is supported.
+'''
+
+EXAMPLES = r'''
+ - name: Get detailed list of Unity entities
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - host
+ - fc_initiator
+ - iscsi_initiator
+ - cg
+ - storage_pool
+ - vol
+ - snapshot_schedule
+ - nas_server
+ - file_system
+ - snapshot
+ - nfs_export
+ - smb_share
+ - user_quota
+ - tree_quota
+ - disk_group
+ - nfs_server
+ - cifs_server
+ - ethernet_port
+ - file_interface
+
+ - name: Get information of Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+
+ - name: Get list of hosts on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - host
+
+ - name: Get list of FC initiators on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - fc_initiator
+
+ - name: Get list of ISCSI initiators on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - iscsi_initiator
+
+ - name: Get list of consistency groups on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - cg
+
+ - name: Get list of storage pools on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - storage_pool
+
+ - name: Get list of volumes on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - vol
+
+ - name: Get list of snapshot schedules on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - snapshot_schedule
+
+ - name: Get list of NAS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nas_server
+
+ - name: Get list of File Systems on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - file_system
+
+ - name: Get list of Snapshots on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - snapshot
+
+ - name: Get list of NFS exports on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nfs_export
+
+ - name: Get list of SMB shares on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - smb_share
+
+ - name: Get list of user quotas on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - user_quota
+
+ - name: Get list of quota trees on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - tree_quota
+
+ - name: Get list of disk groups on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - disk_group
+
+ - name: Get list of NFS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nfs_server
+
+ - name: Get list of CIFS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - cifs_server
+
+ - name: Get list of ethernet ports on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - ethernet_port
+
+ - name: Get list of file interfaces on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - file_interface
+'''
+
+RETURN = r'''
+Array_Details:
+ description: Details of the Unity Array.
+ returned: always
+ type: dict
+ contains:
+ api_version:
+ description: The current api version of the Unity Array.
+ type: str
+ earliest_api_version:
+ description: The earliest api version of the Unity Array.
+ type: str
+ model:
+ description: The model of the Unity Array.
+ type: str
+ name:
+ description: The name of the Unity Array.
+ type: str
+ software_version:
+ description: The software version of the Unity Array.
+ type: str
+ sample: {
+ "api_version": "12.0",
+ "earliest_api_version": "4.0",
+ "existed": true,
+ "hash": 8766644083532,
+ "id": "0",
+ "model": "Unity 480",
+ "name": "APM00213404195",
+ "software_version": "5.2.1"
+ }
+
+Hosts:
+ description: Details of the hosts.
+ returned: When hosts exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the host.
+ type: str
+ name:
+ description: The name of the host.
+ type: str
+ sample: [
+ {
+ "auto_manage_type": "HostManageEnum.UNKNOWN",
+ "datastores": null,
+ "description": "",
+ "existed": true,
+ "fc_host_initiators": null,
+ "hash": 8762200072289,
+ "health": {
+ "UnityHealth": {
+ "hash": 8762200072352
+ }
+ },
+ "host_container": null,
+ "host_ip_ports": {
+ "UnityHostIpPortList": [
+ {
+ "UnityHostIpPort": {
+ "hash": 8762200072361
+ }
+ }
+ ]
+ },
+ "host_luns": null,
+ "host_polled_uuid": null,
+ "host_pushed_uuid": null,
+ "host_uuid": null,
+ "host_v_vol_datastore": null,
+ "id": "Host_2191",
+ "iscsi_host_initiators": null,
+ "last_poll_time": null,
+ "name": "10.225.2.153",
+ "os_type": "Linux",
+ "registration_type": null,
+ "storage_resources": null,
+ "tenant": null,
+ "type": "HostTypeEnum.HOST_MANUAL",
+ "vms": null
+ }
+ ]
+
+FC_initiators:
+ description: Details of the FC initiators.
+ returned: When FC initiator exist.
+ type: list
+ contains:
+ WWN:
+ description: The WWN of the FC initiator.
+ type: str
+ id:
+ description: The id of the FC initiator.
+ type: str
+ sample: [
+ {
+ "WWN": "20:00:00:0E:1E:E9:B8:FC:21:00:00:0E:1E:E9:B8:FC",
+ "id": "HostInitiator_3"
+ },
+ {
+ "WWN": "20:00:00:0E:1E:E9:B8:F7:21:00:00:0E:1E:E9:B8:F7",
+ "id": "HostInitiator_4"
+ }
+ ]
+
+ISCSI_initiators:
+ description: Details of the ISCSI initiators.
+ returned: When ISCSI initiators exist.
+ type: list
+ contains:
+ IQN:
+ description: The IQN of the ISCSI initiator.
+ type: str
+ id:
+ description: The id of the ISCSI initiator.
+ type: str
+ sample: [
+ {
+ "IQN": "iqn.1994-05.com.redhat:634d768090f",
+ "id": "HostInitiator_1"
+ },
+ {
+ "IQN": "iqn.1994-05.com.redhat:2835ba62cc6d",
+ "id": "HostInitiator_2"
+ }
+ ]
+
+Consistency_Groups:
+ description: Details of the Consistency Groups.
+ returned: When Consistency Groups exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Consistency Group.
+ type: str
+ name:
+ description: The name of the Consistency Group.
+ type: str
+ sample: [
+ {
+ "advanced_dedup_status": "DedupStatusEnum.DISABLED",
+ "block_host_access": {
+ "UnityBlockHostAccessList": [
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8745385821206
+ }
+ },
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8745386530115
+ }
+ },
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8745386530124
+ }
+ }
+ ]
+ },
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "data_reduction_status": "DataReductionStatusEnum.DISABLED",
+ "datastores": null,
+ "dedup_status": null,
+ "description": "CG has created with all parametres.",
+ "esx_filesystem_block_size": null,
+ "esx_filesystem_major_version": null,
+ "existed": true,
+ "filesystem": null,
+ "hash": 8745385801328,
+ "health": {
+ "UnityHealth": {
+ "hash": 8745386647098
+ }
+ },
+ "host_v_vol_datastore": null,
+ "id": "res_93",
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": false,
+ "luns": {
+ "UnityLunList": [
+ {
+ "UnityLun": {
+ "hash": 8745389830024,
+ "id": "sv_64"
+ }
+ },
+ {
+ "UnityLun": {
+ "hash": 8745386526751,
+ "id": "sv_63"
+ }
+ }
+ ]
+ },
+ "metadata_size": 8858370048,
+ "metadata_size_allocated": 7516192768,
+ "name": "CG1_Ansible_Test_SS",
+ "per_tier_size_used": [
+ 11811160064,
+ 0,
+ 0
+ ],
+ "pools": {
+ "UnityPoolList": [
+ {
+ "UnityPool": {
+ "hash": 8745386552375,
+ "id": "pool_3"
+ }
+ }
+ ]
+ },
+ "relocation_policy": "TieringPolicyEnum.AUTOTIER",
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 99418112,
+ "size_total": 268435456000,
+ "size_used": null,
+ "snap_count": 1,
+ "snap_schedule": {
+ "UnitySnapSchedule": {
+ "hash": 8745386550224,
+ "id": "snapSch_66"
+ }
+ },
+ "snaps_size_allocated": 8888320,
+ "snaps_size_total": 108675072,
+ "thin_status": "ThinStatusEnum.TRUE",
+ "type": "StorageResourceTypeEnum.CONSISTENCY_GROUP",
+ "virtual_volumes": null,
+ "vmware_uuid": null
+ },
+ ]
+
+Storage_Pools:
+ description: Details of the Storage Pools.
+ returned: When Storage Pools exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Storage Pool.
+ type: str
+ name:
+ description: The name of the Storage Pool.
+ type: str
+ sample: [
+ {
+ "alert_threshold": 70,
+ "creation_time": "2021-10-18 12:45:12+00:00",
+ "description": "",
+ "existed": true,
+ "harvest_state": "UsageHarvestStateEnum.PAUSED_COULD_NOT_REACH_HWM",
+ "hash": 8741501012399,
+ "health": {
+ "UnityHealth": {
+ "hash": 8741501012363
+ }
+ },
+ "id": "pool_2",
+ "is_all_flash": false,
+ "is_empty": false,
+ "is_fast_cache_enabled": false,
+ "is_harvest_enabled": true,
+ "is_snap_harvest_enabled": false,
+ "metadata_size_subscribed": 312458870784,
+ "metadata_size_used": 244544700416,
+ "name": "fastVP_pool",
+ "object_id": 12884901891,
+ "pool_fast_vp": {
+ "UnityPoolFastVp": {
+ "hash": 8741501228023
+ }
+ },
+ "pool_space_harvest_high_threshold": 95.0,
+ "pool_space_harvest_low_threshold": 85.0,
+ "pool_type": "StoragePoolTypeEnum.TRADITIONAL",
+ "raid_type": "RaidTypeEnum.RAID5",
+ "rebalance_progress": null,
+ "size_free": 2709855928320,
+ "size_subscribed": 2499805044736,
+ "size_total": 3291018690560,
+ "size_used": 455513956352,
+ "snap_size_subscribed": 139720515584,
+ "snap_size_used": 66002944,
+ "snap_space_harvest_high_threshold": 25.0,
+ "snap_space_harvest_low_threshold": 20.0,
+ "tiers": {
+ "UnityPoolTierList": [
+ {
+ "UnityPoolTier": {
+ "hash": 8741500996410
+ }
+ },
+ {
+ "UnityPoolTier": {
+ "hash": 8741501009430
+ }
+ },
+ {
+ "UnityPoolTier": {
+ "hash": 8741501009508
+ }
+ }
+ ]
+ }
+ },
+ ]
+
+Volumes:
+ description: Details of the Volumes.
+ returned: When Volumes exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Volume.
+ type: str
+ name:
+ description: The name of the Volume.
+ type: str
+ sample: [
+ {
+ "current_node": "NodeEnum.SPB",
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "default_node": "NodeEnum.SPB",
+ "description": null,
+ "effective_io_limit_max_iops": null,
+ "effective_io_limit_max_kbps": null,
+ "existed": true,
+ "family_base_lun": {
+ "UnityLun": {
+ "hash": 8774260820794,
+ "id": "sv_27"
+ }
+ },
+ "family_clone_count": 0,
+ "hash": 8774260854260,
+ "health": {
+ "UnityHealth": {
+ "hash": 8774260812499
+ }
+ },
+ "host_access": {
+ "UnityBlockHostAccessList": [
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8774260826387
+ }
+ }
+ ]
+ },
+ "id": "sv_27",
+ "io_limit_policy": null,
+ "is_advanced_dedup_enabled": false,
+ "is_compression_enabled": null,
+ "is_data_reduction_enabled": false,
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": false,
+ "is_thin_clone": false,
+ "is_thin_enabled": false,
+ "metadata_size": 4294967296,
+ "metadata_size_allocated": 4026531840,
+ "name": "VSI-UNITY-test-task",
+ "per_tier_size_used": [
+ 111400714240,
+ 0,
+ 0
+ ],
+ "pool": {
+ "UnityPool": {
+ "hash": 8774260811427
+ }
+ },
+ "size_allocated": 107374182400,
+ "size_total": 107374182400,
+ "size_used": null,
+ "snap_count": 0,
+ "snap_schedule": null,
+ "snap_wwn": "60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97",
+ "snaps_size": 0,
+ "snaps_size_allocated": 0,
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8774267822228
+ }
+ },
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "LUNTypeEnum.VMWARE_ISCSI",
+ "wwn": "60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2"
+ },
+ ]
+
+Snapshot_Schedules:
+ description: Details of the Snapshot Schedules.
+ returned: When Snapshot Schedules exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Snapshot Schedule.
+ type: str
+ name:
+ description: The name of the Snapshot Schedule.
+ type: str
+ sample: [
+ {
+ "existed": true,
+ "hash": 8775599492651,
+ "id": "snapSch_1",
+ "is_default": true,
+ "is_modified": null,
+ "is_sync_replicated": false,
+ "luns": null,
+ "modification_time": "2021-08-18 19:10:33.774000+00:00",
+ "name": "CEM_DEFAULT_SCHEDULE_DEFAULT_PROTECTION",
+ "rules": {
+ "UnitySnapScheduleRuleList": [
+ {
+ "UnitySnapScheduleRule": {
+ "hash": 8775599498593
+ }
+ }
+ ]
+ },
+ "storage_resources": {
+ "UnityStorageResourceList": [
+ {
+ "UnityStorageResource": {
+ "hash": 8775599711597,
+ "id": "res_88"
+ }
+ },
+ {
+ "UnityStorageResource": {
+ "hash": 8775599711528,
+ "id": "res_3099"
+ }
+ }
+ ]
+ },
+ "version": "ScheduleVersionEnum.LEGACY"
+ },
+ ]
+
+NAS_Servers:
+ description: Details of the NAS Servers.
+ returned: When NAS Servers exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the NAS Server.
+ type: str
+ name:
+ description: The name of the NAS Server.
+ type: str
+ sample: [
+ {
+ "allow_unmapped_user": null,
+ "cifs_server": null,
+ "current_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8747629920422,
+ "id": "spb"
+ }
+ },
+ "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NONE",
+ "default_unix_user": null,
+ "default_windows_user": null,
+ "existed": true,
+ "file_dns_server": null,
+ "file_interface": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": 8747626606870,
+ "id": "if_6"
+ }
+ }
+ ]
+ },
+ "filesystems": {
+ "UnityFileSystemList": [
+ {
+ "UnityFileSystem": {
+ "hash": 8747625901355,
+ "id": "fs_6892"
+ }
+ },
+ ]
+ },
+ "hash": 8747625900370,
+ "health": {
+ "UnityHealth": {
+ "hash": 8747625900493
+ }
+ },
+ "home_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8747625877420,
+ "id": "spb"
+ }
+ },
+ "id": "nas_1",
+ "is_backup_only": false,
+ "is_multi_protocol_enabled": false,
+ "is_packet_reflect_enabled": false,
+ "is_replication_destination": false,
+ "is_replication_enabled": false,
+ "is_windows_to_unix_username_mapping_enabled": null,
+ "name": "lglad072",
+ "pool": {
+ "UnityPool": {
+ "hash": 8747629920479,
+ "id": "pool_3"
+ }
+ },
+ "preferred_interface_settings": {
+ "UnityPreferredInterfaceSettings": {
+ "hash": 8747626625166,
+ "id": "preferred_if_1"
+ }
+ },
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 2952790016,
+ "tenant": null,
+ "virus_checker": {
+ "UnityVirusChecker": {
+ "hash": 8747626604144,
+ "id": "cava_1"
+ }
+ }
+ },
+ ]
+
+File_Systems:
+ description: Details of the File Systems.
+ returned: When File Systems exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the File System.
+ type: str
+ name:
+ description: The name of the File System.
+ type: str
+ sample: [
+ {
+ "access_policy": "AccessPolicyEnum.UNIX",
+ "cifs_notify_on_change_dir_depth": 512,
+ "cifs_share": null,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "description": "",
+ "existed": true,
+ "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN",
+ "format": "FSFormatEnum.UFS64",
+ "hash": 8786518053735,
+ "health": {
+ "UnityHealth": {
+ "hash": 8786518049091
+ }
+ },
+ "host_io_size": "HostIOSizeEnum.GENERAL_8K",
+ "id": "fs_12",
+ "is_advanced_dedup_enabled": false,
+ "is_cifs_notify_on_access_enabled": false,
+ "is_cifs_notify_on_write_enabled": false,
+ "is_cifs_op_locks_enabled": true,
+ "is_cifs_sync_writes_enabled": false,
+ "is_data_reduction_enabled": false,
+ "is_read_only": false,
+ "is_smbca": false,
+ "is_thin_enabled": true,
+ "locking_policy": "FSLockingPolicyEnum.MANDATORY",
+ "metadata_size": 4294967296,
+ "metadata_size_allocated": 3758096384,
+ "min_size_allocated": 0,
+ "name": "vro-daniel-test",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8786517296113,
+ "id": "nas_1"
+ }
+ },
+ "nfs_share": null,
+ "per_tier_size_used": [
+ 6442450944,
+ 0,
+ 0
+ ],
+ "pool": {
+ "UnityPool": {
+ "hash": 8786518259493,
+ "id": "pool_3"
+ }
+ },
+ "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES",
+ "size_allocated": 283148288,
+ "size_allocated_total": 4041244672,
+ "size_preallocated": 2401206272,
+ "size_total": 107374182400,
+ "size_used": 1620312064,
+ "snap_count": 0,
+ "snaps_size": 0,
+ "snaps_size_allocated": 0,
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8786518044167,
+ "id": "res_20"
+ }
+ },
+ "supported_protocols": "FSSupportedProtocolEnum.NFS",
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "FilesystemTypeEnum.FILESYSTEM"
+ },
+ ]
+
+Snapshots:
+ description: Details of the Snapshots.
+ returned: When Snapshots exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Snapshot.
+ type: str
+ name:
+ description: The name of the Snapshot.
+ type: str
+ sample: [
+ {
+ "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT",
+ "attached_wwn": null,
+ "creation_time": "2022-04-06 11:19:26.818000+00:00",
+ "creator_schedule": null,
+ "creator_type": "SnapCreatorTypeEnum.REP_V2",
+ "creator_user": null,
+ "description": "",
+ "existed": true,
+ "expiration_time": null,
+ "hash": 8739100256648,
+ "host_access": null,
+ "id": "38654716464",
+ "io_limit_policy": null,
+ "is_auto_delete": false,
+ "is_modifiable": false,
+ "is_modified": false,
+ "is_read_only": true,
+ "is_system_snap": true,
+ "last_writable_time": null,
+ "lun": {
+ "UnityLun": {
+ "hash": 8739100148962,
+ "id": "sv_301"
+ }
+ },
+ "name": "42949677504_APM00213404195_0000.ckpt000_9508038064690266.2_238",
+ "parent_snap": null,
+ "size": 3221225472,
+ "snap_group": null,
+ "state": "SnapStateEnum.READY",
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8739100173002,
+ "id": "sv_301"
+ }
+ }
+ },
+ ]
+
+NFS_Exports:
+ description: Details of the NFS Exports.
+ returned: When NFS Exports exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the NFS Export.
+ type: str
+ name:
+ description: The name of the NFS Export.
+ type: str
+ sample: [
+ {
+ "anonymous_gid": 4294967294,
+ "anonymous_uid": 4294967294,
+ "creation_time": "2021-12-01 06:21:48.381000+00:00",
+ "default_access": "NFSShareDefaultAccessEnum.NO_ACCESS",
+ "description": "",
+ "existed": true,
+ "export_option": 1,
+ "export_paths": [
+ "10.230.24.20:/zack_nfs_01"
+ ],
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8747298565566,
+ "id": "fs_67"
+ }
+ },
+ "hash": 8747298565548,
+ "host_accesses": null,
+ "id": "NFSShare_29",
+ "is_read_only": null,
+ "min_security": "NFSShareSecurityEnum.SYS",
+ "modification_time": "2022-04-01 11:44:17.553000+00:00",
+ "name": "zack_nfs_01",
+ "nfs_owner_username": null,
+ "no_access_hosts": null,
+ "no_access_hosts_string": "10.226.198.207,10.226.198.25,10.226.198.44,10.226.198.85,Host1,
+Host2,Host4,Host5,Host6,10.10.0.0/255.255.240.0",
+ "path": "/",
+ "read_only_hosts": null,
+ "read_only_hosts_string": "",
+ "read_only_root_access_hosts": null,
+ "read_only_root_hosts_string": "",
+ "read_write_hosts": null,
+ "read_write_hosts_string": "",
+ "read_write_root_hosts_string": "",
+ "role": "NFSShareRoleEnum.PRODUCTION",
+ "root_access_hosts": null,
+ "snap": null,
+ "type": "NFSTypeEnum.NFS_SHARE"
+ },
+ ]
+
+SMB_Shares:
+ description: Details of the SMB Shares.
+ returned: When SMB Shares exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the SMB Share.
+ type: str
+ name:
+ description: The name of the SMB Share.
+ type: str
+ sample: [
+ {
+ "creation_time": "2022-03-17 11:56:54.867000+00:00",
+ "description": "",
+ "existed": true,
+ "export_paths": [
+ "\\\\multi-prot-pie.extreme1.com\\multi-prot-hui",
+ "\\\\10.230.24.26\\multi-prot-hui"
+ ],
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8741295638110,
+ "id": "fs_140"
+ }
+ },
+ "hash": 8741295638227,
+ "id": "SMBShare_20",
+ "is_abe_enabled": false,
+ "is_ace_enabled": false,
+ "is_branch_cache_enabled": false,
+ "is_continuous_availability_enabled": false,
+ "is_dfs_enabled": false,
+ "is_encryption_enabled": false,
+ "is_read_only": null,
+ "modified_time": "2022-03-17 11:56:54.867000+00:00",
+ "name": "multi-prot-hui",
+ "offline_availability": "CifsShareOfflineAvailabilityEnum.NONE",
+ "path": "/",
+ "snap": null,
+ "type": "CIFSTypeEnum.CIFS_SHARE",
+ "umask": "022"
+ },
+ ]
+
+User_Quotas:
+ description: Details of the user quotas.
+ returned: When user quotas exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the user quota.
+ type: str
+ uid:
+ description: The UID of the user quota.
+ type: str
+ sample: [
+ {
+ "id": "userquota_171798694698_0_60000",
+ "uid": 60000
+ },
+ {
+ "id": "userquota_171798694939_0_5001",
+ "uid": 5001
+ }
+ ]
+
+Tree_Quotas:
+ description: Details of the quota trees.
+ returned: When quota trees exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the quota tree.
+ type: str
+ path:
+ description: The path of the quota tree.
+ type: str
+ sample: [
+ {
+ "id": "treequota_171798709589_1",
+ "path": "/vro-ui-fs-rkKfimmN"
+ },
+ {
+ "id": "treequota_171798709590_1",
+ "path": "/vro-ui-fs-mGYXAMqk"
+ }
+ ]
+
+Disk_Groups:
+ description: Details of the disk groups.
+ returned: When disk groups exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the disk group.
+ type: str
+ name:
+ description: The name of the disk group.
+ type: str
+ tier_type:
+ description: The tier type of the disk group.
+ type: str
+ sample: [
+ {
+ "id": "dg_3",
+ "name": "400 GB SAS Flash 2",
+ "tier_type": "EXTREME_PERFORMANCE"
+ },
+ {
+ "id": "dg_16",
+ "name": "600 GB SAS 10K",
+ "tier_type": "PERFORMANCE"
+ }
+ ]
+
+NFS_Servers:
+ description: Details of the NFS Servers.
+ returned: When NFS Servers exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the NFS Servers.
+ type: str
+ sample: [
+ {
+ "id": "nfs_3",
+ },
+ {
+ "id": "nfs_4",
+ },
+ {
+ "id": "nfs_9",
+ }
+ ]
+CIFS_Servers:
+ description: Details of the CIFS Servers.
+ returned: When CIFS Servers exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the CIFS Servers.
+ type: str
+ name:
+ description: The name of the CIFS server.
+ type: str
+ sample: [
+ {
+ "id": "cifs_3",
+ "name": "test_cifs_1"
+ },
+ {
+ "id": "cifs_4",
+ "name": "test_cifs_2"
+ },
+ {
+ "id": "cifs_9",
+ "name": "test_cifs_3"
+ }
+ ]
+Ethernet_ports:
+ description: Details of the ethernet ports.
+ returned: When ethernet ports exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the ethernet port.
+ type: str
+ name:
+ description: The name of the ethernet port.
+ type: str
+ sample: [
+ {
+ "id": "spa_mgmt",
+ "name": "SP A Management Port"
+ },
+ {
+ "id": "spa_ocp_0_eth0",
+ "name": "SP A 4-Port Card Ethernet Port 0"
+ },
+ {
+ "id": "spa_ocp_0_eth1",
+ "name": "SP A 4-Port Card Ethernet Port 1"
+ }
+ ]
+File_interfaces:
+ description: Details of the file inetrfaces.
+ returned: When file inetrface exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the file inetrface.
+ type: str
+ name:
+ description: The name of the file inetrface.
+ type: str
+ ip_address:
+ description: IP address of the file inetrface.
+ type: str
+ sample: [
+ {
+ "id": "if_3",
+ "ip_address": "xx.xx.xx.xx",
+ "name": "1_APMXXXXXXXXXX"
+ },
+ {
+ "id": "if_3",
+ "ip_address": "xx.xx.xx.xx",
+ "name": "2_APMXXXXXXXXXX"
+ },
+ {
+ "id": "if_3",
+ "ip_address": "xx.xx.xx.xx",
+ "name": "3_APMXXXXXXXXXX"
+ }
+ ]
+'''
+
+from re import sub
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('info')
+SUCCESSFULL_LISTED_MSG = 'Successfully listed.'
+
+application_type = "Ansible/1.6.0"
+
+
+class Info(object):
+ """Class with Info operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_info_parameters())
+
+ """ initialize the ansible module """
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=True)
+ utils.ensure_required_libs(self.module)
+
+ self.unity = utils.get_unity_unisphere_connection(self.module.params,
+ application_type)
+ LOG.info('Got the unity instance for provisioning on Unity')
+
+ def get_array_details(self):
+ """ Get the list of snapshot schedules on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting array details ')
+ array_details = self.unity.info
+ return array_details._get_properties()
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = 'Incorrect username or password provided.'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = 'Get array details from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_hosts_list(self):
+ """ Get the list of hosts on a given Unity storage system """
+
+ try:
+ LOG.info('Getting hosts list ')
+ hosts = self.unity.get_host()
+ return result_list(hosts)
+
+ except Exception as e:
+ msg = 'Get hosts list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_fc_initiators_list(self):
+ """ Get the list of FC Initiators on a given Unity storage system """
+
+ try:
+ LOG.info('Getting FC initiators list ')
+ fc_initiator = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, type=utils.HostInitiatorTypeEnum.FC)
+ return fc_initiators_result_list(fc_initiator)
+
+ except Exception as e:
+ msg = 'Get FC initiators list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_iscsi_initiators_list(self):
+ """ Get the list of ISCSI initiators on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting ISCSI initiators list ')
+ iscsi_initiator = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, type=utils.HostInitiatorTypeEnum.
+ ISCSI)
+ return iscsi_initiators_result_list(iscsi_initiator)
+
+ except Exception as e:
+ msg = 'Get ISCSI initiators list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_consistency_groups_list(self):
+ """ Get the list of consistency groups on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting consistency groups list ')
+ consistency_groups = utils.cg.UnityConsistencyGroupList \
+ .get(self.unity._cli)
+ return result_list(consistency_groups)
+
+ except Exception as e:
+ msg = 'Get consistency groups list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_storage_pools_list(self):
+ """ Get the list of storage pools on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting storage pools list ')
+ storage_pools = self.unity.get_pool()
+ return result_list(storage_pools)
+
+ except Exception as e:
+ msg = 'Get storage pools list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_volumes_list(self):
+ """ Get the list of volumes on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting volumes list ')
+ volumes = self.unity.get_lun()
+ return result_list(volumes)
+
+ except Exception as e:
+ msg = 'Get volumes list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snapshot_schedules_list(self):
+ """ Get the list of snapshot schedules on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting snapshot schedules list ')
+ snapshot_schedules = utils.snap_schedule.UnitySnapScheduleList \
+ .get(cli=self.unity._cli)
+ return result_list(snapshot_schedules)
+
+ except Exception as e:
+ msg = 'Get snapshot schedules list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_servers_list(self):
+ """Get the list of NAS servers on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting NAS servers list")
+ nas_servers = self.unity.get_nas_server()
+ return result_list(nas_servers)
+
+ except Exception as e:
+ msg = 'Get NAS servers list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_file_systems_list(self):
+ """Get the list of file systems on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting file systems list")
+ file_systems = self.unity.get_filesystem()
+ return result_list(file_systems)
+
+ except Exception as e:
+ msg = 'Get file systems list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snapshots_list(self):
+ """Get the list of snapshots on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting snapshots list")
+ snapshots = self.unity.get_snap()
+ return result_list(snapshots)
+
+ except Exception as e:
+ msg = 'Get snapshots from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nfs_exports_list(self):
+ """Get the list of NFS exports on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting NFS exports list")
+ nfs_exports = self.unity.get_nfs_share()
+ return result_list(nfs_exports)
+
+ except Exception as e:
+ msg = 'Get NFS exports from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_smb_shares_list(self):
+ """Get the list of SMB shares on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting SMB shares list")
+ smb_shares = self.unity.get_cifs_share()
+ return result_list(smb_shares)
+
+ except Exception as e:
+ msg = 'Get SMB shares from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_user_quota_list(self):
+ """Get the list of user quotas on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting user quota list")
+ user_quotas = self.unity.get_user_quota()
+ return user_quota_result_list(user_quotas)
+
+ except Exception as e:
+ msg = 'Get user quotas from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_tree_quota_list(self):
+ """Get the list of quota trees on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting quota tree list")
+ tree_quotas = self.unity.get_tree_quota()
+ return tree_quota_result_list(tree_quotas)
+
+ except Exception as e:
+ msg = 'Get quota trees from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_disk_groups_list(self):
+ """Get the list of disk group details on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting disk group list")
+ pool_disk_list = []
+ disk_instances = utils.UnityDiskGroupList(cli=self.unity._cli)
+ if disk_instances:
+ for disk in disk_instances:
+ pool_disk = {"id": disk.id, "name": disk.name,
+ "tier_type": disk.tier_type.name}
+ pool_disk_list.append(pool_disk)
+ return pool_disk_list
+ except Exception as e:
+ msg = 'Get disk group from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nfs_server_list(self):
+ """Get the list of NFS servers on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting NFS servers list")
+ nfs_servers = self.unity.get_nfs_server()
+ return nfs_server_result_list(nfs_servers)
+
+ except Exception as e:
+ msg = 'Get NFS servers list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_cifs_server_list(self):
+ """Get the list of CIFS servers on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting CIFS servers list")
+ cifs_servers = self.unity.get_cifs_server()
+ return result_list(cifs_servers)
+
+ except Exception as e:
+ msg = 'Get CIFS servers list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_ethernet_port_list(self):
+ """Get the list of ethernet ports on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting ethernet ports list")
+ ethernet_port = self.unity.get_ethernet_port()
+ return result_list(ethernet_port)
+
+ except Exception as e:
+ msg = 'Get ethernet port list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_file_interface_list(self):
+ """Get the list of file interfaces on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting file interfaces list")
+ file_interface = self.unity.get_file_interface()
+ return file_interface_result_list(file_interface)
+
+ except Exception as e:
+ msg = 'Get file interface list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def perform_module_operation(self):
+ """ Perform different actions on Info based on user parameter
+ chosen in playbook """
+
+ """ Get the array details a given Unity storage system """
+
+ array_details = self.get_array_details()
+ host = []
+ fc_initiator = []
+ iscsi_initiator = []
+ cg = []
+ storage_pool = []
+ vol = []
+ snapshot_schedule = []
+ nas_server = []
+ file_system = []
+ snapshot = []
+ nfs_export = []
+ smb_share = []
+ user_quota = []
+ tree_quota = []
+ disk_group = []
+ nfs_server = []
+ cifs_server = []
+ ethernet_port = []
+ file_interface = []
+
+ subset = self.module.params['gather_subset']
+ if subset is not None:
+ if 'host' in subset:
+ host = self.get_hosts_list()
+ if 'fc_initiator' in subset:
+ fc_initiator = self.get_fc_initiators_list()
+ if 'iscsi_initiator' in subset:
+ iscsi_initiator = self.get_iscsi_initiators_list()
+ if 'cg' in subset:
+ cg = self.get_consistency_groups_list()
+ if 'storage_pool' in subset:
+ storage_pool = self.get_storage_pools_list()
+ if 'vol' in subset:
+ vol = self.get_volumes_list()
+ if 'snapshot_schedule' in subset:
+ snapshot_schedule = self.get_snapshot_schedules_list()
+ if 'nas_server' in subset:
+ nas_server = self.get_nas_servers_list()
+ if 'file_system' in subset:
+ file_system = self.get_file_systems_list()
+ if 'snapshot' in subset:
+ snapshot = self.get_snapshots_list()
+ if 'nfs_export' in subset:
+ nfs_export = self.get_nfs_exports_list()
+ if 'smb_share' in subset:
+ smb_share = self.get_smb_shares_list()
+ if 'user_quota' in subset:
+ user_quota = self.get_user_quota_list()
+ if 'tree_quota' in subset:
+ tree_quota = self.get_tree_quota_list()
+ if 'disk_group' in subset:
+ disk_group = self.get_disk_groups_list()
+ if 'nfs_server' in subset:
+ nfs_server = self.get_nfs_server_list()
+ if 'cifs_server' in subset:
+ cifs_server = self.get_cifs_server_list()
+ if 'ethernet_port' in subset:
+ ethernet_port = self.get_ethernet_port_list()
+ if 'file_interface' in subset:
+ file_interface = self.get_file_interface_list()
+
+ self.module.exit_json(
+ Array_Details=array_details,
+ Hosts=host,
+ FC_initiators=fc_initiator,
+ ISCSI_initiators=iscsi_initiator,
+ Consistency_Groups=cg,
+ Storage_Pools=storage_pool,
+ Volumes=vol,
+ Snapshot_Schedules=snapshot_schedule,
+ NAS_Servers=nas_server,
+ File_Systems=file_system,
+ Snapshots=snapshot,
+ NFS_Exports=nfs_export,
+ SMB_Shares=smb_share,
+ User_Quotas=user_quota,
+ Tree_Quotas=tree_quota,
+ Disk_Groups=disk_group,
+ NFS_Servers=nfs_server,
+ CIFS_Servers=cifs_server,
+ Ethernet_ports=ethernet_port,
+ File_interfaces=file_interface
+ )
+
+
+def result_list(entity):
+ """ Get the name and id associated with the Unity entities """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ item._get_properties()
+ )
+ return result
+ else:
+ return None
+
+
+def fc_initiators_result_list(entity):
+ """ Get the WWN and id associated with the Unity FC initiators """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "WWN": item.initiator_id,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def iscsi_initiators_result_list(entity):
+ """ Get the IQN and id associated with the Unity ISCSI initiators """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "IQN": item.initiator_id,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def user_quota_result_list(entity):
+ """ Get the id and uid associated with the Unity user quotas """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "uid": item.uid,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def tree_quota_result_list(entity):
+ """ Get the id and path associated with the Unity quota trees """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "path": item.path,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def nfs_server_result_list(entity):
+ """ Get the id of NFS Server """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ item._get_properties()
+ )
+ return result
+ else:
+ return None
+
+
+def file_interface_result_list(entity):
+ """ Get the id, name and IP of File Interfaces """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ item._get_properties()
+ )
+ return result
+ else:
+ return None
+
+
+def get_info_parameters():
+ """This method provides parameters required for the ansible
+ info module on Unity"""
+ return dict(gather_subset=dict(type='list', required=False,
+ elements='str',
+ choices=['host', 'fc_initiator',
+ 'iscsi_initiator', 'cg',
+ 'storage_pool', 'vol',
+ 'snapshot_schedule', 'nas_server',
+ 'file_system', 'snapshot',
+ 'nfs_export', 'smb_share',
+ 'user_quota', 'tree_quota', 'disk_group', 'nfs_server', 'cifs_server',
+ 'ethernet_port', 'file_interface']))
+
+
+def main():
+ """ Create Unity Info object and perform action on it
+ based on user input from playbook"""
+ obj = Info()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/interface.py b/ansible_collections/dellemc/unity/plugins/modules/interface.py
new file mode 100644
index 000000000..95ddfd26a
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/interface.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing Interfaces on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: interface
+version_added: '1.4.0'
+short_description: Manage Interfaces on Unity storage system
+description:
+- Managing the Interfaces on the Unity storage system includes adding Interfaces to NAS Server, getting
+ details of interface and deleting configured interfaces.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
+options:
+ nas_server_name:
+ description:
+ - Name of the NAS server for which interface will be configured.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server for which interface will be configured.
+ type: str
+ ethernet_port_name:
+ description:
+ - Name of the ethernet port.
+ type: str
+ ethernet_port_id:
+ description:
+ - ID of the ethernet port.
+ type: str
+ role:
+ description:
+ - Indicates whether interface is configured as production or backup.
+ choices: [PRODUCTION, BACKUP]
+ type: str
+ interface_ip:
+ description:
+ - IP of network interface.
+ required: true
+ type: str
+ netmask:
+ description:
+ - Netmask of network interface.
+ type: str
+ prefix_length:
+ description:
+ - Prefix length is mutually exclusive with I(netmask).
+ type: int
+ gateway:
+ description:
+ - Gateway of network interface.
+ type: str
+ vlan_id:
+ description:
+ - Vlan id of the interface.
+ type: int
+ state:
+ description:
+ - Define whether the interface should exist or not.
+ choices: [present, absent]
+ required: true
+ type: str
+notes:
+- The I(check_mode) is supported.
+- Modify operation for interface is not supported.
+'''
+
+EXAMPLES = r'''
+
+ - name: Add Interface as Backup to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "BACKUP"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Add Interface as Production to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "PRODUCTION"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Get interface details
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "present"
+
+ - name: Delete Interface
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+interface_details:
+ description: Details of the interface.
+ returned: When interface is configured for NAS Server.
+ type: dict
+ contains:
+ existed:
+ description: Indicates if interface exists.
+ type: bool
+ gateway:
+ description: Gateway of network interface.
+ type: str
+ id:
+ description: Unique identifier interface.
+ type: str
+ ip_address:
+ description: IP address of interface.
+ type: str
+ ip_port:
+ description: Port on which network interface is configured.
+ type: dict
+ contains:
+ id:
+ description: ID of ip_port.
+ type: str
+ ip_protocol_version:
+ description: IP protocol version.
+ type: str
+ is_disabled:
+ description: Indicates whether interface is disabled.
+ type: bool
+ is_preferred:
+ description: Indicates whether interface is preferred.
+ type: bool
+ mac_address:
+ description: Mac address of ip_port.
+ type: bool
+ name:
+ description: System configured name of interface.
+ type: bool
+ nas_server:
+ description: Details of NAS server where interface is configured.
+ type: dict
+ contains:
+ id:
+ description: ID of NAS Server.
+ type: str
+ sample: {
+ "existed": true,
+ "gateway": "xx.xx.xx.xx",
+ "hash": 8785300560421,
+ "health": {
+ "UnityHealth": {
+ "hash": 8785300565468
+ }
+ },
+ "id": "if_69",
+ "ip_address": "10.10.10.10",
+ "ip_port": {
+ "UnityIpPort": {
+ "hash": 8785300565300,
+ "id": "spb_ocp_0_eth0"
+ }
+ },
+ "ip_protocol_version": "IpProtocolVersionEnum.IPv4",
+ "is_disabled": false,
+ "is_preferred": true,
+ "mac_address": "0C:48:C6:9F:57:BF",
+ "name": "36_APM00213404194",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8785300565417,
+ "id": "nas_10"
+ }
+ },
+ "netmask": "10.10.10.10",
+ "replication_policy": null,
+ "role": "FileInterfaceRoleEnum.PRODUCTION",
+ "source_parameters": null,
+ "v6_prefix_length": null,
+ "vlan_id": 324
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import ipaddress
+from ipaddress import ip_network
+
+LOG = utils.get_logger('interface')
+
+application_type = "Ansible/1.6.0"
+
+
+class Interface(object):
+ """Class with Interface operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_interface_parameters())
+
+ mutually_exclusive = [['nas_server_name', 'nas_server_id'], ['ethernet_port_id', 'ethernet_port_name'], ['netmask', 'prefix_length']]
+ required_one_of = [['nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ LOG.info('Check Mode Flag %s', self.module.check_mode)
+
+ def get_interface_details(self, nas_server_obj):
+ """Get interface details.
+ :param: nas_server_obj: NAS server object.
+ :return: Returns interface details configured on NAS server.
+ """
+
+ try:
+ nas_server_obj_properties = nas_server_obj._get_properties()
+ if nas_server_obj_properties['file_interface']:
+ for item in nas_server_obj_properties['file_interface']['UnityFileInterfaceList']:
+ interface_id = self.unity_conn.get_file_interface(_id=item['UnityFileInterface']['id'])
+ if interface_id.ip_address == self.module.params['interface_ip']:
+ return interface_id
+ return None
+ except Exception as e:
+ error_msg = "Getting Interface details failed" \
+ " with error %s" % (str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, nas_server_name, nas_server_id):
+ """Get NAS server ID.
+ :param: nas_server_name: The name of NAS server
+ :param: nas_server_id: ID of NAS server
+ :return: Return NAS server object if exists
+ """
+
+ LOG.info("Getting NAS server object")
+ try:
+ if nas_server_name:
+ obj_nas = self.unity_conn.get_nas_server(name=nas_server_name)
+ return obj_nas
+ elif nas_server_id:
+ obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id)
+ if obj_nas._get_properties()['existed']:
+ return obj_nas
+ else:
+ msg = "NAS server with id %s does not exist" % (nas_server_id)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to get details of NAS server with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def add_interface(self, nas_server_obj, ethernet_port_id=None, ethernet_port_name=None, role=None, interface_ip=None,
+ netmask=None, prefix_length=None, gateway=None, vlan_id=None):
+ """Adding interface to NAS server.
+ :param: nas_server_obj: The NAS server object.
+ :param: ethernet_port_id: ID of ethernet port.
+ :param: ethernet_port_name: Name of ethernet port.
+ :param: role: Role of the interface.
+ :param: interface_ip: IP of interface.
+ :param: netmask: Netmask for interface.
+ :param: prefix_length: Prefix length.
+ :param: gateway: Gateway for interface.
+ :param: vlan_id: vlan_id for interface.
+ :return: Return True if interface is configured successfully.
+ """
+
+ LOG.info("Adding interface to NAS Server")
+ try:
+ nas_server_obj_properties = nas_server_obj._get_properties()
+ if nas_server_obj_properties['file_interface']:
+ for item in nas_server_obj_properties['file_interface']['UnityFileInterfaceList']:
+ interface_id = self.unity_conn.get_file_interface(_id=item['UnityFileInterface']['id'])
+ if interface_id._get_properties()['ip_address'] == self.module.params['interface_ip']:
+ return False
+ if role:
+ role_value = get_role_enum(role)
+ if ethernet_port_name:
+ ethernet_port_info = self.unity_conn.get_ethernet_port(name=ethernet_port_name)
+ ethernet_port_id = ethernet_port_info.id
+ if not self.module.check_mode:
+ utils.UnityFileInterface.create(cli=self.unity_conn._cli, nas_server=nas_server_obj.get_id(), ip_port=ethernet_port_id,
+ role=role_value, ip=interface_ip, netmask=netmask, v6_prefix_length=prefix_length,
+ gateway=gateway, vlan_id=vlan_id)
+ return True
+ except Exception as e:
+ msg = "Failed to add interface to NAS Server with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def is_modification_required(self, interface_details):
+ """Check if modification is required in existing interface/s configured for NAS Server
+ :param: interface_details: Existing interface details
+ :return: True if modification is required
+ """
+ key_list = ['vlan_id', 'gateway', 'netmask']
+ for item in key_list:
+ if self.module.params[item] and self.module.params[item] != interface_details[item]:
+ return True
+ return False
+
+ def delete_interface(self, interface_obj):
+ """Delete NFS server.
+ :param: interface_obj: Interface object.
+ :return: Return True if interface is deleted.
+ """
+
+ LOG.info("Deleting interface")
+ try:
+ if not self.module.check_mode:
+ interface_obj.delete()
+ return True
+ except Exception as e:
+ msg = "Failed to delete interface with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_input_params(self):
+ """Validates input parameters"""
+ param_list = ["nas_server_id", "nas_server_name",
+ "ethernet_port_name", "ethernet_port_id", "role",
+ "interface_ip", "netmask", "gateway"]
+
+ for param in param_list:
+ msg = "Please provide valid value for: %s" % param
+ if self.module.params[param] is not None and \
+ len(self.module.params[param].strip()) == 0:
+ errmsg = msg.format(param)
+ self.module.fail_json(msg=errmsg)
+
+ if self.module.params['vlan_id'] is not None and \
+ (self.module.params['vlan_id'] <= 3 or
+ self.module.params['vlan_id'] >= 4094):
+ self.module.fail_json(msg='vlan_id should be in the '
+ 'range of 3 to 4094')
+
+ if self.module.params['interface_ip'] and \
+ not is_valid_ip(self.module.params['interface_ip']):
+ self.module.fail_json(msg='The value for interface ip is invalid')
+
+ if self.module.params['gateway'] and \
+ not is_valid_ip(self.module.params['gateway']):
+ self.module.fail_json(msg='The value for gateway is invalid')
+
+ if self.module.params['netmask'] and not \
+ utils.is_valid_netmask(self.module.params['netmask']):
+ self.module.fail_json(msg='Invalid IPV4 address specified '
+ 'for netmask')
+
+ if self.module.params['interface_ip'] and \
+ (get_ip_version(self.module.params['interface_ip']) == 6):
+ self.module.fail_json(msg='IPv6 format is not supported')
+
+ def validate_create_params(self):
+ """Validates input parameters for adding interface"""
+ if self.module.params['role'] is None:
+ self.module.fail_json(msg='Role is a mandatory parameter'
+ ' for adding interface to NAS Server.')
+ if self.module.params['ethernet_port_name'] is None and \
+ self.module.params['ethernet_port_id'] is None:
+ self.module.fail_json(msg='ethernet_port_name/ethernet_port_id '
+ 'is mandatory parameter for adding '
+ 'interface to NAS Server.')
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on Interface module based on parameters
+ passed in the playbook
+ """
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_name = self.module.params['nas_server_name']
+ ethernet_port_name = self.module.params['ethernet_port_name']
+ ethernet_port_id = self.module.params['ethernet_port_id']
+ role = self.module.params['role']
+ interface_ip = self.module.params['interface_ip']
+ netmask = self.module.params['netmask']
+ prefix_length = self.module.params['prefix_length']
+ gateway = self.module.params['gateway']
+ vlan_id = self.module.params['vlan_id']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and Interface details
+ result = dict(
+ changed=False,
+ interface_details={}
+ )
+ modify_flag = False
+
+ self.validate_input_params()
+
+ interface_details = None
+
+ nas_server_obj = self.get_nas_server_obj(nas_server_name, nas_server_id)
+
+ interface_obj = self.get_interface_details(nas_server_obj)
+
+ if interface_obj and state == 'present':
+ interface_details = interface_obj._get_properties()
+ modify_flag = self.is_modification_required(interface_details)
+ if modify_flag:
+ self.module.fail_json(msg="Modification of Interfaces for NAS server is not supported through Ansible module")
+
+ if not interface_obj and state == 'present':
+ self.validate_create_params()
+
+ result['changed'] = self.add_interface(nas_server_obj, ethernet_port_id, ethernet_port_name, role,
+ interface_ip, netmask, prefix_length, gateway, vlan_id)
+
+ if interface_obj and state == 'absent':
+ result['changed'] = self.delete_interface(interface_obj)
+
+ if result['changed']:
+ nas_server_obj = self.get_nas_server_obj(nas_server_name, nas_server_id)
+ interface_obj = self.get_interface_details(nas_server_obj)
+ if interface_obj:
+ interface_details = interface_obj._get_properties()
+
+ result['interface_details'] = interface_details
+
+ self.module.exit_json(**result)
+
+
+def get_interface_parameters():
+ """This method provide parameters required for the ansible
+ Interface module on Unity"""
+ return dict(
+ nas_server_id=dict(type='str'),
+ nas_server_name=dict(type='str'),
+ ethernet_port_name=dict(type='str'),
+ ethernet_port_id=dict(type='str'),
+ role=dict(type='str', choices=['PRODUCTION', 'BACKUP']),
+ interface_ip=dict(required=True, type='str'),
+ netmask=dict(type='str'),
+ prefix_length=dict(type='int'),
+ gateway=dict(type='str'),
+ vlan_id=dict(type='int'),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def get_role_enum(role):
+ """Getting correct enum values for role
+ :param: role: Indicates role of interface.
+ :return: enum value for role.
+ """
+ if utils.FileInterfaceRoleEnum[role]:
+ role = utils.FileInterfaceRoleEnum[role]
+ return role
+
+
+def is_valid_ip(address):
+ """Validating IP address format
+ :param: address: IP address to be validated for format.
+ """
+ try:
+ ipaddress.ip_address(address)
+ return True
+ except ValueError:
+ return False
+
+
+def get_ip_version(val):
+ """Returns IP address version
+ :param: val: IP address to be validated for version.
+ """
+ try:
+ val = u'{0}'.format(val)
+ ip = ip_network(val, strict=False)
+ return ip.version
+ except ValueError:
+ return 0
+
+
+def main():
+ """Create Unity Interface object and perform action on it
+ based on user input from playbook"""
+ obj = Interface()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nasserver.py b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py
new file mode 100644
index 000000000..713125cc2
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py
@@ -0,0 +1,1142 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nasserver
+version_added: '1.1.0'
+short_description: Manage NAS servers on Unity storage system
+extends_documentation_fragment:
+- dellemc.unity.unity
+author:
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+description:
+- Managing NAS servers on Unity storage system includes get,
+ modification to the NAS servers.
+options:
+ nas_server_id:
+ description:
+ - The ID of the NAS server.
+ - Either I(nas_server_name) or I(nas_server_id) is required to perform the task.
+ - The parameters I(nas_server_name) and I(nas_server_id) are mutually exclusive.
+ type: str
+ nas_server_name:
+ description:
+ - The Name of the NAS server.
+ - Either I(nas_server_name) or I(nas_server_id) is required to perform the task.
+ - The parameters I(nas_server_name) and I(nas_server_id) are mutually exclusive.
+ type: str
+ nas_server_new_name:
+ description:
+ - The new name of the NAS server.
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ is_replication_destination:
+ description:
+ - It specifies whether the NAS server is a replication destination.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ is_backup_only:
+ description:
+ - It specifies whether the NAS server is used as backup only.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ is_multiprotocol_enabled:
+ description:
+ - This parameter indicates whether multiprotocol sharing mode is enabled.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ allow_unmapped_user:
+ description:
+ - This flag is used to mandatorily disable access in case of any user
+ mapping failure.
+ - If C(true), then enable access in case of any user mapping failure.
+ - If C(false), then disable access in case of any user mapping failure.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ default_windows_user:
+ description:
+ - Default windows user name used for granting access in the case of Unix
+ to Windows user mapping failure.
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ default_unix_user:
+ description:
+ - Default Unix user name used for granting access in the case of Windows
+ to Unix user mapping failure.
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ enable_windows_to_unix_username_mapping:
+ description:
+ - This parameter indicates whether a Unix to/from Windows user name
+ mapping is enabled.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ is_packet_reflect_enabled:
+ description:
+ - If the packet has to be reflected, then this parameter
+ has to be set to C(true).
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ current_unix_directory_service:
+ description:
+ - This is the directory service used for querying identity information
+ for UNIX (such as UIDs, GIDs, net groups).
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ choices: ["NONE", "NIS", "LOCAL", "LDAP", "LOCAL_THEN_NIS", "LOCAL_THEN_LDAP"]
+ replication_params:
+ description:
+ - Settings required for enabling replication.
+ type: dict
+ suboptions:
+ destination_nas_server_name:
+ description:
+ - Name of the destination nas server.
+ - Default value will be source nas server name prefixed by 'DR_'.
+ type: str
+ replication_mode:
+ description:
+ - The replication mode.
+ - This is mandatory to enable replication.
+ type: str
+ choices: ['asynchronous', 'manual']
+ rpo:
+ description:
+ - Maximum time to wait before the system syncs the source and destination LUNs.
+ - The I(rpo) option should be specified if the I(replication_mode) is C(asynchronous).
+ - The value should be in range of C(5) to C(1440).
+ type: int
+ replication_type:
+ description:
+ - Type of replication.
+ choices: ['local', 'remote']
+ type: str
+ remote_system:
+ description:
+ - Details of remote system to which the replication is being configured.
+ - The I(remote_system) option should be specified if the
+ I(replication_type) is C(remote).
+ type: dict
+ suboptions:
+ remote_system_host:
+ required: true
+ description:
+ - IP or FQDN for remote Unity unisphere Host.
+ type: str
+ remote_system_username:
+ type: str
+ required: true
+ description:
+ - User name of remote Unity unisphere Host.
+ remote_system_password:
+ type: str
+ required: true
+ description:
+ - Password of remote Unity unisphere Host.
+ remote_system_verifycert:
+ type: bool
+ default: true
+ description:
+ - Boolean variable to specify whether or not to validate SSL
+ certificate of remote Unity unisphere Host.
+ - C(true) - Indicates that the SSL certificate should be verified.
+ - C(false) - Indicates that the SSL certificate should not be
+ verified.
+ remote_system_port:
+ description:
+ - Port at which remote Unity unisphere is hosted.
+ type: int
+ default: 443
+ destination_pool_name:
+ description:
+ - Name of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_id).
+ type: str
+ destination_pool_id:
+ description:
+ - Id of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_name).
+ type: str
+ destination_sp:
+ description:
+ - Storage process of destination nas server
+ choices: ['SPA', 'SPB']
+ type: str
+ is_backup:
+ description:
+ - Indicates if the destination nas server is backup.
+ type: bool
+ replication_name:
+ description:
+ - User defined name for replication session.
+ type: str
+ new_replication_name:
+ description:
+ - Replication name to rename the session to.
+ type: str
+ replication_state:
+ description:
+ - State of the replication.
+ choices: ['enable', 'disable']
+ type: str
+ replication_reuse_resource:
+ description:
+ - This parameter indicates if existing NAS Server is to be used for replication.
+ type: bool
+ state:
+ description:
+ - Define the state of NAS server on the array.
+ - The value present indicates that NAS server should exist on the system after
+ the task is executed.
+ - In this release deletion of NAS server is not supported. Hence, if state is
+ set to C(absent) for any existing NAS server then error will be thrown.
+ - For any non-existing NAS server, if state is set to C(absent) then it will return None.
+ type: str
+ required: true
+ choices: ['present', 'absent']
+
+notes:
+- The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+
+ - name: Get Details of NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "{{nas_server_name}}"
+ state: "present"
+
+ - name: Modify Details of NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "{{nas_server_name}}"
+ nas_server_new_name: "updated_sample_nas_server"
+ is_replication_destination: False
+ is_backup_only: False
+ is_multiprotocol_enabled: True
+ allow_unmapped_user: True
+ default_unix_user: "default_unix_sample_user"
+ default_windows_user: "default_windows_sample_user"
+ enable_windows_to_unix_username_mapping: True
+ current_unix_directory_service: "LDAP"
+ is_packet_reflect_enabled: True
+ state: "present"
+
+ - name: Enable replication for NAS Server on Local System
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_id: "nas_10"
+ replication_reuse_resource: False
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "local"
+ destination_pool_name: "Pool_Ansible_Neo_DND"
+ destination_sp: "SPA"
+ is_backup: True
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: False
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.10.10.10'
+ remote_system_verifycert: False
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ destination_sp: "SPA"
+ is_backup: True
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System in existing NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: True
+ replication_params:
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ replication_name: "test_replication"
+ remote_system:
+ remote_system_host: '10.10.10.10'
+ remote_system_verifycert: False
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Modify replication on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_repl"
+ new_replication_name: "test_repl_updated"
+ replication_mode: "asynchronous"
+ rpo: 50
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_state: "disable"
+ state: "present"
+
+ - name: Disable replication by specifying replication_name on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+nas_server_details:
+ description: The NAS server details.
+ type: dict
+ returned: When NAS server exists.
+ contains:
+ name:
+ description: Name of the NAS server.
+ type: str
+ id:
+ description: ID of the NAS server.
+ type: str
+ allow_unmapped_user:
+ description: Enable/disable access status in case of any user
+ mapping failure.
+ type: bool
+ current_unix_directory_service:
+ description: Directory service used for querying identity
+ information for UNIX (such as UIDs, GIDs, net groups).
+ type: str
+ default_unix_user:
+ description: Default Unix user name used for granting access
+ in the case of Windows to Unix user mapping failure.
+ type: str
+ default_windows_user:
+ description: Default windows user name used for granting
+ access in the case of Unix to Windows user mapping
+ failure.
+ type: str
+ is_backup_only:
+ description: Whether the NAS server is used as backup only.
+ type: bool
+ is_multi_protocol_enabled:
+ description: Indicates whether multiprotocol sharing mode is
+ enabled.
+ type: bool
+ is_packet_reflect_enabled:
+ description: If the packet reflect has to be enabled.
+ type: bool
+ is_replication_destination:
+ description: If the NAS server is a replication destination
+ then True.
+ type: bool
+ is_windows_to_unix_username_mapping_enabled:
+ description: Indicates whether a Unix to/from Windows user name
+ mapping is enabled.
+ type: bool
+ sample: {
+ "allow_unmapped_user": null,
+ "cifs_server": {
+ "UnityCifsServerList": [
+ {
+ "UnityCifsServer": {
+ "hash": 8761756885270,
+ "id": "cifs_34"
+ }
+ }
+ ]
+ },
+ "current_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8761756885273,
+ "id": "spb"
+ }
+ },
+ "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NIS",
+ "default_unix_user": null,
+ "default_windows_user": null,
+ "existed": true,
+ "file_dns_server": {
+ "UnityFileDnsServer": {
+ "hash": 8761756885441,
+ "id": "dns_12"
+ }
+ },
+ "file_interface": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": 8761756889908,
+ "id": "if_37"
+ }
+ }
+ ]
+ },
+ "filesystems": null,
+ "hash": 8761757005084,
+ "health": {
+ "UnityHealth": {
+ "hash": 8761756867588
+ }
+ },
+ "home_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8761756867618,
+ "id": "spb"
+ }
+ },
+ "id": "nas_10",
+ "is_backup_only": false,
+ "is_multi_protocol_enabled": false,
+ "is_packet_reflect_enabled": false,
+ "is_replication_destination": false,
+ "is_replication_enabled": true,
+ "is_windows_to_unix_username_mapping_enabled": null,
+ "name": "dummy_nas",
+ "pool": {
+ "UnityPool": {
+ "hash": 8761756885360,
+ "id": "pool_7"
+ }
+ },
+ "preferred_interface_settings": {
+ "UnityPreferredInterfaceSettings": {
+ "hash": 8761756885438,
+ "id": "preferred_if_10"
+ }
+ },
+ "replication_type": "ReplicationTypeEnum.REMOTE",
+ "size_allocated": 3489660928,
+ "tenant": null,
+ "virus_checker": {
+ "UnityVirusChecker": {
+ "hash": 8761756885426,
+ "id": "cava_10"
+ }
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+LOG = utils.get_logger('nasserver')
+
+application_type = "Ansible/1.6.0"
+
+
+class NASServer(object):
+ """Class with NAS Server operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_nasserver_parameters())
+
+ # initialize the ansible module
+ mut_ex_args = [['nas_server_name', 'nas_server_id']]
+ required_one_of = [['nas_server_name', 'nas_server_id']]
+
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mut_ex_args,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # nas server details
+ self.result = {"changed": False,
+ 'nas_server_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.nas_server_conn_obj = utils.nas_server.UnityNasServer(
+ self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def get_current_uds_enum(self, current_uds):
+ """
+ Get the enum of the Offline Availability parameter.
+ :param current_uds: Current Unix Directory Service string
+ :return: current_uds enum
+ """
+ if current_uds in \
+ utils.NasServerUnixDirectoryServiceEnum.__members__:
+ return utils.NasServerUnixDirectoryServiceEnum[current_uds]
+ else:
+ error_msg = "Invalid value {0} for Current Unix Directory" \
+ " Service provided".format(current_uds)
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server(self, nas_server_name, nas_server_id):
+ """
+ Get the NAS Server Object using NAME/ID of the NAS Server.
+ :param nas_server_name: Name of the NAS Server
+ :param nas_server_id: ID of the NAS Server
+ :return: NAS Server object.
+ """
+ nas_server = nas_server_name if nas_server_name else nas_server_id
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id,
+ name=nas_server_name)
+ if nas_server_id and obj_nas and not obj_nas.existed:
+ # if obj_nas is not None and existed is observed as False,
+ # then None will be returned.
+ LOG.error("NAS Server object does not exist"
+ " with ID: %s ", nas_server_id)
+ return None
+ return obj_nas
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = "Failed to get details of NAS Server" \
+ " {0} with error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = "Failed to get details of NAS Server" \
+ " {0} with error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ nas_server = nas_server_name if nas_server_name \
+ else nas_server_id
+ err_msg = "Failed to get nas server details {0} with" \
+ " error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def to_update(self, nas_server_obj, current_uds):
+ LOG.info("Checking Whether the parameters are modified or not.")
+
+ # Checking all parameters individually because the nas obj return
+ # names are different compared to ansible parameter names.
+
+ # Current Unix Directory Service
+ if current_uds is not None and \
+ current_uds != nas_server_obj.current_unix_directory_service:
+ return True
+
+ # Rename NAS Server
+ if self.module.params['nas_server_new_name'] is not None and \
+ self.module.params['nas_server_new_name'] != \
+ nas_server_obj.name:
+ return True
+
+ # Is Replication Destination
+ if self.module.params["is_replication_destination"] is not None and \
+ (nas_server_obj.is_replication_destination is None or
+ self.module.params["is_replication_destination"] !=
+ nas_server_obj.is_replication_destination):
+ return True
+
+ # Is Multiprotocol Enabled
+ if self.module.params["is_multiprotocol_enabled"] is not None and \
+ (nas_server_obj.is_multi_protocol_enabled is None or
+ self.module.params["is_multiprotocol_enabled"] !=
+ nas_server_obj.is_multi_protocol_enabled):
+ return True
+
+ # Is Back Up Enabled
+ if self.module.params["is_backup_only"] is not None and \
+ (nas_server_obj.is_backup_only is None or
+ self.module.params["is_backup_only"] !=
+ nas_server_obj.is_backup_only):
+ return True
+
+ # Is Packet Reflect Enabled
+ if self.module.params["is_packet_reflect_enabled"] is not None and \
+ (nas_server_obj.is_packet_reflect_enabled is None or
+ self.module.params["is_packet_reflect_enabled"] !=
+ nas_server_obj.is_packet_reflect_enabled):
+ return True
+
+ # Allow Unmapped User
+ if self.module.params["allow_unmapped_user"] is not None and \
+ (nas_server_obj.allow_unmapped_user is None or
+ self.module.params["allow_unmapped_user"] !=
+ nas_server_obj.allow_unmapped_user):
+ return True
+
+ # Enable Windows To Unix User Mapping Flag
+ nas_win_flag = \
+ nas_server_obj.is_windows_to_unix_username_mapping_enabled
+ input_win_flag = \
+ self.module.params["enable_windows_to_unix_username_mapping"]
+ if input_win_flag is not None and \
+ (nas_win_flag is None or nas_win_flag != input_win_flag):
+ return True
+
+ # Default Windows User
+ if self.module.params["default_windows_user"] is not None and \
+ (nas_server_obj.default_windows_user is None or
+ self.module.params["default_windows_user"] !=
+ nas_server_obj.default_windows_user):
+ return True
+
+ # Default Unix User
+ if self.module.params["default_unix_user"] is not None and \
+ (nas_server_obj.default_unix_user is None or
+ self.module.params["default_unix_user"] !=
+ nas_server_obj.default_unix_user):
+ return True
+
+ return False
+
+ def update_nas_server(self, nas_server_obj, new_name=None,
+ default_unix_user=None, default_windows_user=None,
+ is_rep_dest=None, is_multiprotocol_enabled=None,
+ allow_unmapped_user=None, is_backup_only=None,
+ is_packet_reflect_enabled=None, current_uds=None,
+ enable_win_to_unix_name_map=None):
+ """
+ The Details of the NAS Server will be updated in the function.
+ """
+ try:
+ nas_server_obj.modify(
+ name=new_name,
+ is_replication_destination=is_rep_dest,
+ is_backup_only=is_backup_only,
+ is_multi_protocol_enabled=is_multiprotocol_enabled,
+ default_unix_user=default_unix_user,
+ default_windows_user=default_windows_user,
+ allow_unmapped_user=allow_unmapped_user,
+ is_packet_reflect_enabled=is_packet_reflect_enabled,
+ enable_windows_to_unix_username=enable_win_to_unix_name_map,
+ current_unix_directory_service=current_uds)
+
+ except Exception as e:
+ error_msg = "Failed to Update parameters of NAS Server" \
+ " %s with error %s" % (nas_server_obj.name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def modify_replication_session(self, nas_server_obj, repl_session, replication_params):
+ """ Modify the replication session
+ :param: nas_server_obj: NAS server object
+ :param: repl_session: Replication session to be modified
+ :param: replication_params: Module input params
+ :return: True if modification is successful
+ """
+ try:
+ LOG.info("Modifying replication session of nas server %s", nas_server_obj.name)
+ modify_payload = {}
+ if replication_params['replication_mode'] and \
+ replication_params['replication_mode'] == 'manual':
+ rpo = -1
+ elif replication_params['rpo']:
+ rpo = replication_params['rpo']
+ name = repl_session.name
+ if replication_params['new_replication_name'] and \
+ name != replication_params['new_replication_name']:
+ name = replication_params['new_replication_name']
+
+ if repl_session.name != name:
+ modify_payload['name'] = name
+ if ((replication_params['replication_mode'] or replication_params['rpo']) and
+ repl_session.max_time_out_of_sync != rpo):
+ modify_payload['max_time_out_of_sync'] = rpo
+
+ if modify_payload:
+ repl_session.modify(**modify_payload)
+ return True
+
+ return False
+ except Exception as e:
+ errormsg = "Modifying replication session failed with error %s", e
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def enable_replication(self, nas_server_obj, replication, replication_reuse_resource):
+ """ Enable replication on NAS Server
+ :param: nas_server_obj: NAS Server object.
+ :param: replication: Dict which has all the replication parameter values.
+ :return: True if replication is enabled else False.
+ """
+ try:
+ # Validate replication params
+ self.validate_nas_server_replication_params(replication)
+ self.update_replication_params(replication, replication_reuse_resource)
+
+ repl_session = \
+ self.get_replication_session_on_filter(nas_server_obj, replication, "modify")
+ if repl_session:
+ return self.modify_replication_session(nas_server_obj, repl_session, replication)
+
+ self.validate_create_replication_params(replication)
+ replication_args_list = get_replication_args_list(replication)
+
+ # Get remote system
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ self.get_remote_system(replication, replication_args_list)
+
+ # Form parameters when replication_reuse_resource is False
+ if not replication_reuse_resource:
+ update_replication_arg_list(replication, replication_args_list, nas_server_obj)
+ nas_server_obj.replicate_with_dst_resource_provisioning(**replication_args_list)
+ else:
+ replication_args_list['dst_nas_server_id'] = replication['destination_nas_server_id']
+ nas_server_obj.replicate(**replication_args_list)
+ return True
+
+ if 'replication_type' in replication and replication['replication_type'] == 'local':
+ update_replication_arg_list(replication, replication_args_list, nas_server_obj)
+ nas_server_obj.replicate_with_dst_resource_provisioning(**replication_args_list)
+ return True
+
+ except Exception as e:
+ errormsg = "Enabling replication to the nas server %s failed " \
+ "with error %s" % (nas_server_obj.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def disable_replication(self, obj_nas, replication_params):
+ """ Remove replication from the nas server
+ :param: replication_params: Module input params
+ :param: obj_nas: NAS Server object
+ :return: True if disabling replication is successful
+ """
+ try:
+ LOG.info(("Disabling replication on the nas server %s", obj_nas.name))
+ if replication_params:
+ self.update_replication_params(replication_params, False)
+ repl_session = \
+ self.get_replication_session_on_filter(obj_nas, replication_params, "delete")
+ if repl_session:
+ repl_session.delete()
+ return True
+ return False
+ except Exception as e:
+ errormsg = "Disabling replication on the nas server %s failed " \
+ "with error %s" % (obj_nas.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_replication_session_on_filter(self, obj_nas, replication_params, action):
+ """ Retrieves replication session on nas server
+ :param: obj_nas: NAS server object
+ :param: replication_params: Module input params
+ :param: action: Specifies action as modify or delete
+ :return: Replication session based on filter
+ """
+ if replication_params and replication_params['remote_system']:
+ repl_session = \
+ self.get_replication_session(obj_nas, filter_key="remote_system_name",
+ replication_params=replication_params)
+ elif replication_params and replication_params['replication_name']:
+ repl_session = \
+ self.get_replication_session(obj_nas, filter_key="name",
+ name=replication_params['replication_name'])
+ else:
+ repl_session = self.get_replication_session(obj_nas, action=action)
+ if repl_session and action and replication_params and \
+ replication_params['replication_type'] == 'local' and \
+ repl_session.remote_system.name != self.unity_conn.name:
+ return None
+ return repl_session
+
+ def get_replication_session(self, obj_nas, filter_key=None, replication_params=None, name=None, action=None):
+ """ Retrieves the replication sessions configured for the nas server
+ :param: obj_nas: NAS server object
+ :param: filter_key: Key to filter replication sessions
+ :param: replication_params: Module input params
+ :param: name: Replication session name
+ :param: action: Specifies modify or delete action on replication session
+ :return: Replication session details
+ """
+ try:
+ repl_session = self.unity_conn.get_replication_session(src_resource_id=obj_nas.id)
+ if not filter_key and repl_session:
+ if len(repl_session) > 1:
+ if action:
+ error_msg = 'There are multiple replication sessions for the nas server.'\
+ ' Please specify replication_name in replication_params to %s.' % action
+ self.module.fail_json(msg=error_msg)
+ return repl_session
+ return repl_session[0]
+ for session in repl_session:
+ if filter_key == 'remote_system_name' and \
+ session.remote_system.name == replication_params['remote_system_name']:
+ return session
+ if filter_key == 'name' and session.name == name:
+ return session
+ return None
+ except Exception as e:
+ errormsg = "Retrieving replication session on the nas server failed " \
+ "with error %s", str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_remote_system(self, replication, replication_args_list):
+ remote_system_name = replication['remote_system_name']
+ remote_system_list = self.unity_conn.get_remote_system()
+ for remote_system in remote_system_list:
+ if remote_system.name == remote_system_name:
+ replication_args_list['remote_system'] = remote_system
+ break
+ if 'remote_system' not in replication_args_list.keys():
+ errormsg = "Remote system %s is not found" % (remote_system_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def update_replication_params(self, replication, replication_reuse_resource):
+ """ Update replication dict with remote system information
+ :param: replication: Dict which has all the replication parameter values
+ :return: Updated replication Dict
+ """
+ try:
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ connection_params = {
+ 'unispherehost': replication['remote_system']['remote_system_host'],
+ 'username': replication['remote_system']['remote_system_username'],
+ 'password': replication['remote_system']['remote_system_password'],
+ 'validate_certs': replication['remote_system']['remote_system_verifycert'],
+ 'port': replication['remote_system']['remote_system_port']
+ }
+ remote_system_conn = utils.get_unity_unisphere_connection(
+ connection_params, application_type)
+ replication['remote_system_name'] = remote_system_conn.name
+ if replication['destination_pool_name'] is not None:
+ pool_object = remote_system_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+ if replication['destination_nas_server_name'] is not None and replication_reuse_resource:
+ nas_object = remote_system_conn.get_nas_server(name=replication['destination_nas_server_name'])
+ replication['destination_nas_server_id'] = nas_object.id
+ else:
+ replication['remote_system_name'] = self.unity_conn.name
+ if replication['destination_pool_name'] is not None:
+ pool_object = self.unity_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+ except Exception as e:
+ errormsg = "Updating replication params failed with error %s" % str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_rpo(self, replication):
+ if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous' \
+ and replication['rpo'] is None:
+ errormsg = "rpo is required together with 'asynchronous' replication_mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if (replication['rpo'] and (replication['rpo'] < 5 or replication['rpo'] > 1440)) \
+ and (replication['replication_mode'] and replication['replication_mode'] != 'manual' or
+ not replication['replication_mode'] and replication['rpo'] != -1):
+ errormsg = "rpo value should be in range of 5 to 1440"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_nas_server_replication_params(self, replication):
+ """ Validate NAS server replication params
+ :param: replication: Dict which has all the replication parameter values
+ """
+
+ # Valdiate replication
+ if replication is None:
+ errormsg = "Please specify replication_params to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ else:
+ # validate destination pool info
+ if replication['destination_pool_id'] is not None and replication['destination_pool_name'] is not None:
+ errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Validate replication mode
+ self.validate_rpo(replication)
+ # Validate replication type
+ if replication['replication_type'] == 'remote' and replication['remote_system'] is None:
+ errormsg = "Remote_system is required together with 'remote' replication_type"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Validate destination NAS server name
+ if 'destination_nas_name' in replication and replication['destination_nas_name'] is not None:
+ dst_nas_server_name_length = len(replication['destination_nas_name'])
+ if dst_nas_server_name_length == 0 or dst_nas_server_name_length > 95:
+ errormsg = "destination_nas_name value should be in range of 1 to 95"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_create_replication_params(self, replication):
+ ''' Validate replication params '''
+ if replication['destination_pool_id'] is None and replication['destination_pool_name'] is None:
+ errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ keys = ['replication_mode', 'replication_type']
+ for key in keys:
+ if replication[key] is None:
+ errormsg = "Please specify %s to enable replication." % key
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on NAS Server based on user parameters
+ chosen in playbook
+ """
+ state = self.module.params['state']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_new_name = self.module.params['nas_server_new_name']
+ default_unix_user = self.module.params['default_unix_user']
+ default_windows_user = self.module.params['default_windows_user']
+
+ is_replication_destination = \
+ self.module.params['is_replication_destination']
+ is_multiprotocol_enabled = \
+ self.module.params['is_multiprotocol_enabled']
+ allow_unmapped_user = self.module.params['allow_unmapped_user']
+ enable_windows_to_unix_username_mapping = \
+ self.module.params['enable_windows_to_unix_username_mapping']
+
+ is_backup_only = self.module.params['is_backup_only']
+ is_packet_reflect_enabled = \
+ self.module.params['is_packet_reflect_enabled']
+
+ current_uds = self.module.params['current_unix_directory_service']
+ replication = self.module.params['replication_params']
+ replication_state = self.module.params['replication_state']
+ replication_reuse_resource = self.module.params['replication_reuse_resource']
+ # Get the enum for the corresponding offline_availability
+ if current_uds:
+ current_uds = \
+ self.get_current_uds_enum(current_uds)
+
+ changed = False
+
+ if replication and replication_state is None:
+ self.module.fail_json(msg="Please specify replication_state along with replication_params")
+
+ '''
+ Get details of NAS Server.
+ '''
+ nas_server_obj = None
+ if nas_server_name or nas_server_id:
+ nas_server_obj = self.get_nas_server(nas_server_name,
+ nas_server_id)
+
+ # As creation is not supported and if NAS Server does not exist
+ # along with state as present, then error will be thrown.
+ if not nas_server_obj and state == "present":
+ msg = "NAS Server Resource not found. Please enter a valid " \
+ "Name/ID to get or modify the parameters of nas server."
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ '''
+ Update the parameters of NAS Server
+ '''
+ if nas_server_obj and state == "present":
+ update_flag = self.to_update(nas_server_obj, current_uds)
+ if update_flag:
+ self.update_nas_server(
+ nas_server_obj, nas_server_new_name, default_unix_user,
+ default_windows_user, is_replication_destination,
+ is_multiprotocol_enabled, allow_unmapped_user,
+ is_backup_only, is_packet_reflect_enabled,
+ current_uds, enable_windows_to_unix_username_mapping)
+ changed = True
+
+ # As deletion is not supported and if NAS Server exists along with
+ # state as absent, then error will be thrown.
+ if nas_server_obj and state == 'absent':
+ self.module.fail_json(msg="Deletion of NAS Server is "
+ "currently not supported.")
+
+ if state == 'present' and nas_server_obj and replication_state is not None:
+ if replication_state == 'enable':
+ changed = self.enable_replication(nas_server_obj, replication, replication_reuse_resource)
+ else:
+ changed = self.disable_replication(nas_server_obj, replication)
+
+ '''
+ Update the changed state and NAS Server details
+ '''
+ nas_server_details = None
+ if nas_server_obj:
+ nas_server_details = self.get_nas_server(
+ None, nas_server_obj.id)._get_properties()
+
+ self.result["changed"] = changed
+ self.result["nas_server_details"] = nas_server_details
+ self.module.exit_json(**self.result)
+
+
+def get_nasserver_parameters():
+ """
+ This method provides parameters required for the ansible NAS Server
+ modules on Unity
+ """
+
+ return dict(
+ nas_server_name=dict(), nas_server_id=dict(),
+ nas_server_new_name=dict(),
+ default_unix_user=dict(),
+ default_windows_user=dict(),
+ current_unix_directory_service=dict(
+ choices=["NIS", "LDAP", "LOCAL_THEN_NIS",
+ "LOCAL_THEN_LDAP", "NONE", "LOCAL"]),
+ is_replication_destination=dict(type='bool'),
+ is_backup_only=dict(type='bool'),
+ is_multiprotocol_enabled=dict(type='bool'),
+ allow_unmapped_user=dict(type='bool'),
+ enable_windows_to_unix_username_mapping=dict(type='bool'),
+ is_packet_reflect_enabled=dict(type='bool'),
+ replication_params=dict(type='dict', options=dict(
+ destination_nas_server_name=dict(type='str'),
+ replication_mode=dict(type='str', choices=['asynchronous', 'manual']),
+ rpo=dict(type='int'),
+ replication_type=dict(type='str', choices=['local', 'remote']),
+ remote_system=dict(type='dict',
+ options=dict(
+ remote_system_host=dict(type='str', required=True, no_log=True),
+ remote_system_verifycert=dict(type='bool', required=False,
+ default=True),
+ remote_system_username=dict(type='str', required=True),
+ remote_system_password=dict(type='str', required=True, no_log=True),
+ remote_system_port=dict(type='int', required=False, default=443, no_log=True)
+ )),
+ destination_pool_name=dict(type='str'),
+ destination_pool_id=dict(type='str'),
+ destination_sp=dict(type='str', choices=['SPA', 'SPB']),
+ is_backup=dict(type='bool'),
+ replication_name=dict(type='str'),
+ new_replication_name=dict(type='str')
+ )),
+ replication_reuse_resource=dict(type='bool'),
+ replication_state=dict(type='str', choices=['enable', 'disable']),
+ state=dict(required=True, choices=['present', 'absent'], type='str')
+ )
+
+
+def get_sp_enum(destination_sp):
+ """Getting correct enum values for Storage Processor
+ :param: destination_sp: Storage Processor to be used in Destination NAS Server.
+ :return: enum value for Storage Processor.
+ """
+ if utils.NodeEnum[destination_sp]:
+ destination_sp_enum = utils.NodeEnum[destination_sp]
+ return destination_sp_enum
+
+
+def get_replication_args_list(replication_params):
+ """Returns the replication args for payload"""
+ replication_args_list = {}
+
+ if replication_params['replication_name']:
+ replication_args_list['replication_name'] = replication_params['replication_name']
+ if 'replication_mode' in replication_params and \
+ replication_params['replication_mode'] == 'asynchronous':
+ replication_args_list['max_time_out_of_sync'] = replication_params['rpo']
+ else:
+ replication_args_list['max_time_out_of_sync'] = -1
+
+ return replication_args_list
+
+
+def update_replication_arg_list(replication, replication_args_list, nas_server_obj):
+ """ Update replication arg list
+ :param: replication: Dict which has all the replication parameter values
+ :param: replication_args_list: the existing list which should be updated
+ :param: nas_server_obj: NAS Server object on which replication is to be enabled
+ :return: Updated replication_args_list
+ """
+ if 'destination_sp' in replication and replication['destination_sp']:
+ dst_sp_enum = get_sp_enum(replication['destination_sp'])
+ replication_args_list['dst_sp'] = dst_sp_enum
+
+ replication_args_list['dst_pool_id'] = replication['destination_pool_id']
+
+ if 'is_backup' in replication and replication['is_backup']:
+ replication_args_list['is_backup_only'] = replication['is_backup']
+
+ if replication['replication_type'] == 'local':
+ replication_args_list['dst_nas_server_name'] = "DR_" + nas_server_obj.name
+ if 'destination_nas_server_name' in replication and replication['destination_nas_server_name'] is not None:
+ replication_args_list['dst_nas_server_name'] = replication['destination_nas_server_name']
+ else:
+ if replication['destination_nas_server_name'] is None:
+ replication_args_list['dst_nas_server_name'] = nas_server_obj.name
+
+
+def main():
+ """ Create Unity NAS Server object and perform action on it
+ based on user input from playbook"""
+ obj = NASServer()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfs.py b/ansible_collections/dellemc/unity/plugins/modules/nfs.py
new file mode 100644
index 000000000..e6223066b
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/nfs.py
@@ -0,0 +1,1873 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing nfs export on Unity"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: nfs
+version_added: '1.1.0'
+short_description: Manage NFS export on Unity storage system
+description:
+- Managing NFS export on Unity storage system includes-
+ Create new NFS export,
+ Modify NFS export attributes,
+ Display NFS export details,
+ Delete NFS export.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Vivek Soni (@v-soni11) <ansible.team@dell.com>
+
+options:
+ nfs_export_name:
+ description:
+ - Name of the nfs export.
+ - Mandatory for create operation.
+ - Specify either I(nfs_export_name) or I(nfs_export_id) (but not both) for any
+ operation.
+ type: str
+ nfs_export_id:
+ description:
+ - ID of the nfs export.
+ - This is a unique ID generated by Unity storage system.
+ type: str
+ filesystem_name:
+ description:
+ - Name of the filesystem for which NFS export will be created.
+ - Either filesystem or snapshot is required for creation of the NFS.
+ - If I(filesystem_name) is specified, then I(nas_server) is required to uniquely
+ identify the filesystem.
+ - If filesystem parameter is provided, then snapshot cannot be specified.
+ type: str
+ filesystem_id:
+ description:
+ - ID of the filesystem.
+ - This is a unique ID generated by Unity storage system.
+ type: str
+ snapshot_name:
+ description:
+ - Name of the snapshot for which NFS export will be created.
+ - Either filesystem or snapshot is required for creation of the NFS
+ export.
+ - If snapshot parameter is provided, then filesystem cannot be specified.
+ type: str
+ snapshot_id:
+ description:
+ - ID of the snapshot.
+ - This is a unique ID generated by Unity storage system.
+ type: str
+ nas_server_name:
+ description:
+ - Name of the NAS server on which filesystem will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which filesystem will be hosted.
+ type: str
+ path:
+ description:
+ - Local path to export relative to the NAS server root.
+ - With NFS, each export of a file_system or file_snap must have a unique
+ local path.
+ - Mandatory while creating NFS export.
+ type: str
+ description:
+ description:
+ - Description of the NFS export.
+ - Optional parameter when creating a NFS export.
+ - To modify description, pass the new value in I(description) field.
+ - To remove description, pass the empty value in I(description) field.
+ type: str
+ host_state:
+ description:
+ - Define whether the hosts can access the NFS export.
+ - Required when adding or removing access of hosts from the export.
+ type: str
+ choices: ['present-in-export', 'absent-in-export']
+ anonymous_uid:
+ description:
+ - Specifies the user ID of the anonymous account.
+ - If not specified at the time of creation, it will be set to 4294967294.
+ type: int
+ anonymous_gid:
+ description:
+ - Specifies the group ID of the anonymous account.
+ - If not specified at the time of creation, it will be set to 4294967294.
+ type: int
+ state:
+ description:
+ - State variable to determine whether NFS export will exist or not.
+ required: true
+ type: str
+ choices: ['absent', 'present']
+ default_access:
+ description:
+ - Default access level for all hosts that can access the NFS export.
+ - For hosts that need different access than the default,
+ they can be configured by adding to the list.
+ - If I(default_access) is not mentioned during creation, then NFS export will
+ be created with C(NO_ACCESS).
+ type: str
+ choices: ['NO_ACCESS', 'READ_ONLY', 'READ_WRITE', 'ROOT',
+ 'READ_ONLY_ROOT']
+ min_security:
+ description:
+ - NFS enforced security type for users accessing a NFS export.
+ - If not specified at the time of creation, it will be set to C(SYS).
+ type: str
+ choices: ['SYS', 'KERBEROS', 'KERBEROS_WITH_INTEGRITY',
+ 'KERBEROS_WITH_ENCRYPTION']
+ adv_host_mgmt_enabled:
+ description:
+ - If C(false), allows you to specify hosts without first having to register them.
+ - Mandatory while adding access hosts.
+ type: bool
+ no_access_hosts:
+ description:
+ - Hosts with no access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_only_hosts:
+ description:
+ - Hosts with read-only access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_only_root_hosts:
+ description:
+ - Hosts with read-only for root user access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_write_hosts:
+ description:
+ - Hosts with read and write access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_write_root_hosts:
+ description:
+ - Hosts with read and write for root user access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+notes:
+- The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create nfs export from filesystem
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ path: '/'
+ filesystem_id: "fs_377"
+ state: "present"
+
+- name: Create nfs export from snapshot
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_snap"
+ path: '/'
+ snapshot_name: "ansible_fs_snap"
+ state: "present"
+
+- name: Modify nfs export
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ nas_server_id: "nas_3"
+ description: ""
+ default_access: "READ_ONLY_ROOT"
+ anonymous_gid: 4294967290
+ anonymous_uid: 4294967290
+ state: "present"
+
+- name: Add host in nfs export with adv_host_mgmt_enabled as true
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "Host_1"
+ read_only_hosts:
+ - host_id: "Host_2"
+ read_only_root_hosts:
+ - host_name: "host_name1"
+ read_write_hosts:
+ - host_name: "host_name2"
+ read_write_root_hosts:
+ - ip_address: "1.1.1.1"
+ host_state: "present-in-export"
+ state: "present"
+
+- name: Remove host in nfs export with adv_host_mgmt_enabled as true
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "Host_1"
+ read_only_hosts:
+ - host_id: "Host_2"
+ read_only_root_hosts:
+ - host_name: "host_name1"
+ read_write_hosts:
+ - host_name: "host_name2"
+ read_write_root_hosts:
+ - ip_address: "1.1.1.1"
+ host_state: "absent-in-export"
+ state: "present"
+
+- name: Add host in nfs export with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "168.159.57.4/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.255.2.4"
+ host_state: "present-in-export"
+ state: "present"
+
+- name: Remove host in nfs export with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "168.159.57.4/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.255.2.4"
+ host_state: "absent-in-export"
+ state: "present"
+
+- name: Get nfs details
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_id: "NFSShare_291"
+ state: "present"
+
+- name: Delete nfs export by nfs name
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_name"
+ nas_server_name: "ansible_nas_name"
+ state: "absent"
+"""
+
+RETURN = r"""
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: "false"
+
+nfs_share_details:
+ description: Details of the nfs export.
+ returned: When nfs export exists.
+ type: dict
+ contains:
+ anonymous_uid:
+ description: User ID of the anonymous account
+ type: int
+ anonymous_gid:
+ description: Group ID of the anonymous account
+ type: int
+ default_access:
+ description: Default access level for all hosts that can access export
+ type: str
+ description:
+ description: Description about the nfs export
+ type: str
+ id:
+ description: ID of the nfs export
+ type: str
+ min_security:
+ description: NFS enforced security type for users accessing an export
+ type: str
+ name:
+ description: Name of the nfs export
+ type: str
+ no_access_hosts_string:
+ description: Hosts with no access to the nfs export
+ type: str
+ read_only_hosts_string:
+ description: Hosts with read-only access to the nfs export
+ type: str
+ read_only_root_hosts_string:
+ description: Hosts with read-only for root user access to the nfs export
+ type: str
+ read_write_hosts_string:
+ description: Hosts with read and write access to the nfs export
+ type: str
+ read_write_root_hosts_string:
+ description: Hosts with read and write for root user access to export
+ type: str
+ type:
+ description: NFS export type. i.e. filesystem or snapshot
+ type: str
+ export_paths:
+ description: Export paths that can be used to mount and access export
+ type: list
+ filesystem:
+ description: Details of the filesystem on which nfs export is present
+ type: dict
+ contains:
+ UnityFileSystem:
+ description: filesystem details
+ type: dict
+ contains:
+ id:
+ description: ID of the filesystem
+ type: str
+ name:
+ description: Name of the filesystem
+ type: str
+ nas_server:
+ description: Details of the nas server
+ type: dict
+ contains:
+ UnityNasServer:
+ description: NAS server details
+ type: dict
+ contains:
+ id:
+ description: ID of the nas server
+ type: str
+ name:
+ description: Name of the nas server
+ type: str
+ sample: {
+ 'anonymous_gid': 4294967294,
+ 'anonymous_uid': 4294967294,
+ 'creation_time': '2022-03-09 15:05:34.720000+00:00',
+ 'default_access': 'NFSShareDefaultAccessEnum.NO_ACCESS',
+ 'description': '',
+ 'export_option': 1,
+ 'export_paths': [
+ '**.***.**.**:/dummy-share-123'
+ ],
+ 'filesystem': {
+ 'UnityFileSystem': {
+ 'id': 'fs_id_1',
+ 'name': 'fs_name_1'
+ }
+ },
+ 'host_accesses': None,
+ 'id': 'NFSShare_14393',
+ 'is_read_only': None,
+ 'min_security': 'NFSShareSecurityEnum.SYS',
+ 'modification_time': '2022-04-25 08:12:28.179000+00:00',
+ 'name': 'dummy-share-123',
+ 'nfs_owner_username': None,
+ 'no_access_hosts': None,
+ 'no_access_hosts_string': 'host1,**.***.*.*',
+ 'path': '/',
+ 'read_only_hosts': None,
+ 'read_only_hosts_string': '',
+ 'read_only_root_access_hosts': None,
+ 'read_only_root_hosts_string': '',
+ 'read_write_hosts': None,
+ 'read_write_hosts_string': '',
+ 'read_write_root_hosts_string': '',
+ 'role': 'NFSShareRoleEnum.PRODUCTION',
+ 'root_access_hosts': None,
+ 'snap': None,
+ 'type': 'NFSTypeEnum.NFS_SHARE',
+ 'existed': True,
+ 'nas_server': {
+ 'UnityNasServer': {
+ 'id': 'nas_id_1',
+ 'name': 'dummy_nas_server'
+ }
+ }
+ }
+"""
+
+import re
+import traceback
+
+try:
+ from ipaddress import ip_network, IPv4Network, IPv6Network
+ HAS_IPADDRESS, IP_ADDRESS_IMP_ERR = True, None
+except ImportError:
+ HAS_IPADDRESS, IP_ADDRESS_IMP_ERR = False, traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('nfs')
+
+DEFAULT_ACCESS_LIST = ['NO_ACCESS', 'READ_ONLY', 'READ_WRITE', 'ROOT',
+ 'READ_ONLY_ROOT']
+MIN_SECURITY_LIST = ['SYS', 'KERBEROS', 'KERBEROS_WITH_INTEGRITY',
+ 'KERBEROS_WITH_ENCRYPTION']
+HOST_DICT = dict(type='list', required=False, elements='dict',
+ options=dict(host_name=dict(),
+ host_id=dict(),
+ ip_address=dict(),
+ subnet=dict(),
+ netgroup=dict(),
+ domain=dict()))
+HOST_STATE_LIST = ['present-in-export', 'absent-in-export']
+STATE_LIST = ['present', 'absent']
+
+application_type = "Ansible/1.6.0"
+
+
+class NFS(object):
+ """Class with nfs export operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_nfs_parameters())
+
+ mutually_exclusive = [['nfs_export_id', 'nas_server_id'],
+ ['nfs_export_id', 'nas_server_name'],
+ ['filesystem_id', 'filesystem_name',
+ 'snapshot_id', 'snapshot_name'],
+ ['nas_server_id', 'nas_server_name']]
+ required_one_of = [['nfs_export_id', 'nfs_export_name']]
+
+ """ initialize the ansible module """
+ self.module = AnsibleModule(
+ argument_spec=self.module_params, supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ if not HAS_IPADDRESS:
+ self.module.fail_json(msg=missing_required_lib("ipaddress"),
+ exception=IP_ADDRESS_IMP_ERR)
+
+ self.unity = utils.get_unity_unisphere_connection(self.module.params,
+ application_type)
+ self.cli = self.unity._cli
+
+ self.is_given_nfs_for_fs = None
+ if self.module.params['filesystem_name'] or \
+ self.module.params['filesystem_id']:
+ self.is_given_nfs_for_fs = True
+ elif self.module.params['snapshot_name'] or \
+ self.module.params['snapshot_id']:
+ self.is_given_nfs_for_fs = False
+
+ # Contain hosts input & output parameters
+ self.host_param_mapping = {
+ 'no_access_hosts': 'no_access_hosts_string',
+ 'read_only_hosts': 'read_only_hosts_string',
+ 'read_only_root_hosts': 'read_only_root_hosts_string',
+ 'read_write_hosts': 'read_write_hosts_string',
+ 'read_write_root_hosts': 'read_write_root_hosts_string'
+ }
+
+ # Default_access mapping. keys are giving by user & values are
+ # accepted by SDK
+ self.default_access = {'READ_ONLY_ROOT': 'RO_ROOT'}
+
+ LOG.info('Got the unity instance for provisioning on Unity')
+
+ def validate_host_access_data(self, host_dict):
+ """
+ Validate host access data
+ :param host_dict: Host access data
+ :return None
+ """
+ fqdn_pat = re.compile(r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{0,62}'
+ r'[a-zA-Z0-9]\.)+[a-zA-Z]{2,63}$)')
+
+ if host_dict.get('host_name'):
+ version = get_ip_version(host_dict.get('host_name'))
+ if version in (4, 6):
+ msg = "IP4/IP6: %s given in host_name instead " \
+ "of name" % host_dict.get('host_name')
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if host_dict.get('ip_address'):
+ ip_or_fqdn = host_dict.get('ip_address')
+ version = get_ip_version(ip_or_fqdn)
+ # validate its FQDN or not
+ if version == 0 and not fqdn_pat.match(ip_or_fqdn):
+ msg = "%s is not a valid FQDN" % ip_or_fqdn
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if host_dict.get('subnet'):
+ subnet = host_dict.get('subnet')
+ subnet_info = subnet.split("/")
+ if len(subnet_info) != 2:
+ msg = "Subnet should be in format 'IP address/netmask' or 'IP address/prefix length'"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_adv_host_mgmt_enabled_check(self, host_dict):
+ """
+ Validate adv_host_mgmt_enabled check
+ :param host_dict: Host access data
+ :return None
+ """
+ host_dict_keys_set = set(host_dict.keys())
+ adv_host_mgmt_enabled_true_set = {'host_name', 'host_id', 'ip_address'}
+ adv_host_mgmt_enabled_false_set = {'host_name', 'subnet', 'domain', 'netgroup', 'ip_address'}
+ adv_host_mgmt_enabled_true_diff = host_dict_keys_set - adv_host_mgmt_enabled_true_set
+ adv_host_mgmt_enabled_false_diff = host_dict_keys_set - adv_host_mgmt_enabled_false_set
+ if self.module.params['adv_host_mgmt_enabled'] and adv_host_mgmt_enabled_true_diff != set():
+ msg = "If 'adv_host_mgmt_enabled' is true then host access should only have %s" % adv_host_mgmt_enabled_true_set
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ elif not self.module.params['adv_host_mgmt_enabled'] and adv_host_mgmt_enabled_false_diff != set():
+ msg = "If 'adv_host_mgmt_enabled' is false then host access should only have %s" % adv_host_mgmt_enabled_false_set
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_host_access_input_params(self):
+ """
+ Validate host access params
+ :return None
+ """
+ for param in list(self.host_param_mapping.keys()):
+ if self.module.params[param] and (not self.module.params[
+ 'host_state'] or self.module.params[
+ 'adv_host_mgmt_enabled'] is None):
+ msg = "'host_state' and 'adv_host_mgmt_enabled' is required along with: %s" % param
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ elif self.module.params[param]:
+ for host_dict in self.module.params[param]:
+ host_dict = {k: v for k, v in host_dict.items() if v}
+ self.validate_adv_host_mgmt_enabled_check(host_dict)
+ self.validate_host_access_data(host_dict)
+
+ def validate_module_attributes(self):
+ """
+ Validate module attributes
+ :return None
+ """
+ param_list = ['nfs_export_name', 'nfs_export_id', 'filesystem_name',
+ 'filesystem_id', 'nas_server_id',
+ 'snapshot_name', 'snapshot_id', 'path']
+
+ for param in param_list:
+ if self.module.params[param] and \
+ len(self.module.params[param].strip()) == 0:
+ msg = "Please provide valid value for: %s" % param
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_input(self):
+ """ Validate input parameters """
+
+ if self.module.params['nfs_export_name'] and \
+ not self.module.params['snapshot_name'] and \
+ not self.module.params['snapshot_id']:
+ if ((self.module.params['filesystem_name']) and
+ (not self.module.params['nas_server_id'] and
+ not self.module.params['nas_server_name'])):
+ msg = "Please provide nas server id or name along with " \
+ "filesystem name and nfs name"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if ((not self.module.params['nas_server_id']) and
+ (not self.module.params['nas_server_name']) and
+ (not self.module.params['filesystem_id'])):
+ msg = "Please provide either nas server id/name or " \
+ "filesystem id"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ self.validate_module_attributes()
+ self.validate_host_access_input_params()
+
+ def get_nfs_id_or_name(self):
+ """ Provide nfs_export_id or nfs_export_name user given value
+
+ :return: value provided by user in nfs_export_id/nfs_export_name
+ :rtype: str
+ """
+ if self.module.params['nfs_export_id']:
+ return self.module.params['nfs_export_id']
+ return self.module.params['nfs_export_name']
+
+ def get_nas_from_given_input(self):
+ """ Get nas server object
+
+ :return: nas server object
+ :rtype: UnityNasServer
+ """
+ LOG.info("Getting nas server details")
+ if not self.module.params['nas_server_id'] and not \
+ self.module.params['nas_server_name']:
+ return None
+ id_or_name = self.module.params['nas_server_id'] if \
+ self.module.params['nas_server_id'] else self.module.params[
+ 'nas_server_name']
+ try:
+ nas = self.unity.get_nas_server(
+ _id=self.module.params['nas_server_id'],
+ name=self.module.params['nas_server_name'])
+ except utils.UnityResourceNotFoundError as e:
+ # In case of incorrect name
+ msg = "Given nas server not found error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get nas server: %s due to incorrect " \
+ "username/password error: %s" % (id_or_name, str(e))
+ else:
+ msg = "Failed to get nas server: %s error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to get nas server: %s error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if nas and not nas.existed:
+ # In case of incorrect id, sdk return nas object whose attribute
+ # existed=false, instead of raising UnityResourceNotFoundError
+ msg = "Please check nas details it does not exists"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ LOG.info("Got nas server details")
+ return nas
+
+ def get_nfs_share(self, id=None, name=None):
+ """ Get the nfs export
+
+ :return: nfs_export object if nfs exists else None
+ :rtype: UnityNfsShare or None
+ """
+ try:
+ if not id and not name:
+ msg = "Please give nfs id/name"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ id_or_name = id if id else name
+ LOG.info("Getting nfs export: %s", id_or_name)
+ if id:
+ # Get nfs details from nfs ID
+ if self.is_given_nfs_for_fs:
+ nfs = self.unity.get_nfs_share(
+ _id=id, filesystem=self.fs_obj)
+ elif self.is_given_nfs_for_fs is False:
+ # nfs from snap
+ nfs = self.unity.get_nfs_share(_id=id, snap=self.snap_obj)
+ else:
+ nfs = self.unity.get_nfs_share(_id=id)
+ else:
+ # Get nfs details from nfs name
+ if self.is_given_nfs_for_fs:
+ nfs = self.unity.get_nfs_share(
+ name=name, filesystem=self.fs_obj)
+ elif self.is_given_nfs_for_fs is False:
+ # nfs from snap
+ nfs = self.unity.get_nfs_share(
+ name=name, snap=self.snap_obj)
+ else:
+ nfs = self.unity.get_nfs_share(name=name)
+
+ if isinstance(nfs, utils.UnityNfsShareList):
+ # This block will be executed, when we are trying to get nfs
+ # details using nfs name & nas server.
+ nfs_list = nfs
+ LOG.info("Multiple nfs export with same name: %s "
+ "found", id_or_name)
+ if self.nas_obj:
+ for n in nfs_list:
+ if n.filesystem.nas_server == self.nas_obj:
+ return n
+ msg = "Multiple nfs share with same name: %s found. " \
+ "Given nas server is not correct. Please check"
+ else:
+ msg = "Multiple nfs share with same name: %s found. " \
+ "Please give nas server"
+ else:
+ # nfs is instance of UnityNfsShare class
+ if nfs and nfs.existed:
+ if self.nas_obj and nfs.filesystem.nas_server != \
+ self.nas_obj:
+ msg = "nfs found but nas details given is incorrect"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ LOG.info("Successfully got nfs share for: %s", id_or_name)
+ return nfs
+ elif nfs and not nfs.existed:
+ # in case of incorrect id, sdk returns nfs object whose
+ # attribute existed=False
+ msg = "Please check incorrect nfs id is given"
+ else:
+ msg = "Failed to get nfs share: %s" % id_or_name
+ except utils.UnityResourceNotFoundError as e:
+ msg = "NFS share: %(id_or_name)s not found " \
+ "error: %(err)s" % {'id_or_name': id_or_name, 'err': str(e)}
+ LOG.info(str(msg))
+ return None
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get nfs share: %s due to incorrect " \
+ "username/password error: %s" % (id_or_name, str(e))
+ else:
+ msg = "Failed to get nfs share: %s error: %s" % (id_or_name,
+ str(e))
+ except utils.StoropsConnectTimeoutError as e:
+ msg = "Failed to get nfs share: %s check unispherehost IP: %s " \
+ "error: %s" % (id_or_name,
+ self.module.params['nfs_export_id'], str(e))
+ except Exception as e:
+ msg = "Failed to get nfs share: %s error: %s" % (id_or_name,
+ str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def delete_nfs_share(self, nfs_obj):
+ """ Delete nfs share
+
+ :param nfs: NFS share obj
+ :type nfs: UnityNfsShare
+ :return: None
+ """
+ try:
+ LOG.info("Deleting nfs share: %s", self.get_nfs_id_or_name())
+ nfs_obj.delete()
+ LOG.info("Deleted nfs share")
+ except Exception as e:
+ msg = "Failed to delete nfs share, error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_filesystem(self):
+ """ Get filesystem obj
+
+ :return: filesystem obj
+ :rtype: UnityFileSystem
+ """
+ if self.module.params['filesystem_id']:
+ id_or_name = self.module.params['filesystem_id']
+ elif self.module.params['filesystem_name']:
+ id_or_name = self.module.params['filesystem_name']
+ else:
+ msg = "Please provide filesystem ID/name, to get filesystem"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ try:
+ if self.module.params['filesystem_name']:
+ if not self.nas_obj:
+ err_msg = "NAS Server is required to get the filesystem"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ LOG.info("Getting filesystem by name: %s", id_or_name)
+ fs_obj = self.unity.get_filesystem(
+ name=self.module.params['filesystem_name'],
+ nas_server=self.nas_obj)
+ elif self.module.params['filesystem_id']:
+ LOG.info("Getting filesystem by ID: %s", id_or_name)
+ fs_obj = self.unity.get_filesystem(
+ _id=self.module.params['filesystem_id'])
+ except utils.UnityResourceNotFoundError as e:
+ msg = "Filesystem: %s not found error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get filesystem due to incorrect " \
+ "username/password error: %s" % str(e)
+ else:
+ msg = "Failed to get filesystem error: %s" % str(e)
+ LOG.error(msg)
+ except Exception as e:
+ msg = "Failed to get filesystem: %s error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if fs_obj and fs_obj.existed:
+ LOG.info("Got the filesystem: %s", id_or_name)
+ return fs_obj
+ else:
+ msg = "Filesystem: %s does not exists" % id_or_name
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snapshot(self):
+ """ Get snapshot obj
+
+ :return: Snapshot obj
+ :rtype: UnitySnap
+ """
+ if self.module.params['snapshot_id']:
+ id_or_name = self.module.params['snapshot_id']
+ elif self.module.params['snapshot_name']:
+ id_or_name = self.module.params['snapshot_name']
+ else:
+ msg = "Please provide snapshot ID/name, to get snapshot"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ LOG.info("Getting snapshot: %s", id_or_name)
+ try:
+ if id_or_name:
+ snap_obj = self.unity.get_snap(
+ _id=self.module.params['snapshot_id'],
+ name=self.module.params['snapshot_name'])
+ else:
+ msg = "Failed to get the snapshot. Please provide snapshot " \
+ "details"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.UnityResourceNotFoundError as e:
+ msg = "Failed to get snapshot: %s error: %s" % (id_or_name,
+ str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get snapshot due to incorrect " \
+ "username/password error: %s" % str(e)
+ else:
+ msg = "Failed to get snapshot error: %s" % str(e)
+ LOG.error(msg)
+ except Exception as e:
+ msg = "Failed to get snapshot: %s error: %s" % (id_or_name,
+ str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if snap_obj and snap_obj.existed:
+ LOG.info("Successfully got the snapshot: %s", id_or_name)
+ return snap_obj
+ else:
+ msg = "Snapshot: %s does not exists" % id_or_name
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host_obj(self, host_id=None, host_name=None, ip_address=None):
+ """
+ Get host object
+ :param host_id: ID of the host
+ :param host_name: Name of the host
+ :param ip_address: Network address of the host
+ :return: Host object
+ :rtype: object
+ """
+ try:
+ host_obj = None
+ host = None
+ if host_id:
+ host = host_id
+ host_obj = self.unity.get_host(_id=host_id)
+ elif host_name:
+ host = host_name
+ host_obj = self.unity.get_host(name=host_name)
+ elif ip_address:
+ host = ip_address
+ host_obj = self.unity.get_host(address=ip_address)
+
+ if host_obj and host_obj.existed:
+ LOG.info("Successfully got host: %s", host_obj.name)
+ return host_obj
+ else:
+ msg = f'Host : {host} does not exists'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = f'Failed to get host {host}, error: {e}'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host_access_string_value(self, host_dict):
+ """
+ Form host access string
+ :host_dict Host access type info
+ :return Host access data in string
+ """
+ if host_dict.get("host_id"):
+ return self.get_host_obj(host_id=(host_dict.get("host_id"))).name + ','
+ elif host_dict.get("host_name"):
+ return host_dict.get(
+ "host_name") + ','
+ elif host_dict.get("ip_address"):
+ return host_dict.get(
+ "ip_address") + ','
+ elif host_dict.get("subnet"):
+ return host_dict.get(
+ "subnet") + ','
+ elif host_dict.get("domain"):
+ return "*." + host_dict.get(
+ "domain") + ','
+ elif host_dict.get("netgroup"):
+ return "@" + host_dict.get(
+ "netgroup") + ','
+
+ def get_host_obj_value(self, host_dict):
+ """
+ Form host access value using host object
+ :host_dict Host access type info
+ :return Host object
+ """
+ if host_dict.get("host_id"):
+ return self.get_host_obj(host_id=host_dict.get("host_id"))
+ elif host_dict.get("host_name"):
+ return self.get_host_obj(host_name=host_dict.get("host_name"))
+ elif host_dict.get("ip_address"):
+ return self.get_host_obj(ip_address=host_dict.get("ip_address"))
+
+ def format_host_dict_for_adv_mgmt(self):
+ """
+ Form host access for advance management
+ :return: Formatted Host access type info
+ :rtype: dict
+ """
+ result_host = {}
+ for param in list(self.host_param_mapping.keys()):
+ if self.module.params[param]:
+ result_host[param] = []
+ for host_dict in self.module.params[param]:
+ result_host[param].append(self.get_host_obj_value(host_dict))
+
+ if 'read_only_root_hosts' in result_host:
+ result_host['read_only_root_access_hosts'] = result_host.pop('read_only_root_hosts')
+ if 'read_write_root_hosts' in result_host:
+ result_host['root_access_hosts'] = result_host.pop('read_write_root_hosts')
+ return result_host
+
+ def format_host_dict_for_non_adv_mgmt(self):
+ """
+ Form host access for non advance management option
+ :return: Formatted Host access type info
+ :rtype: dict
+ """
+ result_host = {}
+ for param in list(self.host_param_mapping.keys()):
+ if self.module.params[param]:
+ result_host[param] = ''
+ for host_dict in self.module.params[param]:
+ result_host[param] += self.get_host_access_string_value(host_dict)
+
+ if result_host != {}:
+ # Since we are supporting HOST STRING parameters instead of HOST
+ # parameters, so lets change given input HOST parameter name to
+ # HOST STRING parameter name and strip trailing ','
+ result_host = {self.host_param_mapping[k]: v[:-1] for k, v in result_host.items()}
+ return result_host
+
+ def get_host_dict_from_pb(self):
+ """ Traverse all given hosts params and provides with host dict,
+ which has respective host str param name with its value
+ required by SDK
+
+ :return: dict with key named as respective host str param name & value
+ required by SDK
+ :rtype: dict
+ """
+ LOG.info("Getting host parameters")
+ result_host = {}
+ if self.module.params['host_state']:
+ if not self.module.params['adv_host_mgmt_enabled']:
+ result_host = self.format_host_dict_for_non_adv_mgmt()
+ else:
+ result_host = self.format_host_dict_for_adv_mgmt()
+ return result_host
+
+ def get_adv_param_from_pb(self):
+ """ Provide all the advance parameters named as required by SDK
+
+ :return: all given advanced parameters
+ :rtype: dict
+ """
+ param = {}
+ LOG.info("Getting all given advance parameter")
+ host_dict = self.get_host_dict_from_pb()
+ if host_dict:
+ param.update(host_dict)
+
+ fields = ('description', 'anonymous_uid', 'anonymous_gid')
+ for field in fields:
+ if self.module.params[field] is not None:
+ param[field] = self.module.params[field]
+
+ if self.module.params['min_security'] and self.module.params[
+ 'min_security'] in utils.NFSShareSecurityEnum.__members__:
+ LOG.info("Getting min_security object from NFSShareSecurityEnum")
+ param['min_security'] = utils.NFSShareSecurityEnum[
+ self.module.params['min_security']]
+
+ if self.module.params['default_access']:
+ param['default_access'] = self.get_default_access()
+
+ LOG.info("Successfully got advance parameter: %s", param)
+ return param
+
+ def get_default_access(self):
+ LOG.info("Getting default_access object from "
+ "NFSShareDefaultAccessEnum")
+ default_access = self.default_access.get(
+ self.module.params['default_access'],
+ self.module.params['default_access'])
+ try:
+ return utils.NFSShareDefaultAccessEnum[default_access]
+ except KeyError as e:
+ msg = "default_access: %s not found error: %s" % (
+ default_access, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg)
+
+ def correct_payload_as_per_sdk(self, payload, nfs_details=None):
+ """ Correct payload keys as required by SDK
+
+ :param payload: Payload used for create/modify operation
+ :type payload: dict
+ :param nfs_details: NFS details
+ :type nfs_details: dict
+ :return: Payload required by SDK
+ :rtype: dict
+ """
+ ouput_host_param = self.host_param_mapping.values()
+ if set(payload.keys()) & set(ouput_host_param):
+ if not nfs_details or (nfs_details and nfs_details['export_option'] != 1):
+ payload['export_option'] = 1
+ if 'read_write_root_hosts_string' in payload:
+ # SDK have param named 'root_access_hosts_string' instead of
+ # 'read_write_root_hosts_string'
+ payload['root_access_hosts_string'] = payload.pop(
+ 'read_write_root_hosts_string')
+
+ return payload
+
+ def create_nfs_share_from_filesystem(self):
+ """ Create nfs share from given filesystem
+
+ :return: nfs_share object
+ :rtype: UnityNfsShare
+ """
+
+ name = self.module.params['nfs_export_name']
+ path = self.module.params['path']
+
+ if not name or not path:
+ msg = "Please provide name and path both for create"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ param = self.get_adv_param_from_pb()
+ if 'default_access' in param:
+ # create nfs from FILESYSTEM take 'share_access' as param in SDK
+ param['share_access'] = param.pop('default_access')
+ LOG.info("Param name: 'share_access' is used instead of "
+ "'default_access' in SDK so changed")
+
+ param = self.correct_payload_as_per_sdk(param)
+
+ LOG.info("Creating nfs share from filesystem with param: %s", param)
+ try:
+ nfs_obj = utils.UnityNfsShare.create(
+ cli=self.cli, name=name, fs=self.fs_obj, path=path, **param)
+ LOG.info("Successfully created nfs share: %s", nfs_obj)
+ return nfs_obj
+ except utils.UnityNfsShareNameExistedError as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
+ except Exception as e:
+ msg = "Failed to create nfs share: %s error: %s" % (name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def create_nfs_share_from_snapshot(self):
+ """ Create nfs share from given snapshot
+
+ :return: nfs_share object
+ :rtype: UnityNfsShare
+ """
+
+ name = self.module.params['nfs_export_name']
+ path = self.module.params['path']
+
+ if not name or not path:
+ msg = "Please provide name and path both for create"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ param = self.get_adv_param_from_pb()
+
+ param = self.correct_payload_as_per_sdk(param)
+
+ LOG.info("Creating nfs share from snap with param: %s", param)
+ try:
+ nfs_obj = utils.UnityNfsShare.create_from_snap(
+ cli=self.cli, name=name, snap=self.snap_obj, path=path, **param)
+ LOG.info("Successfully created nfs share: %s", nfs_obj)
+ return nfs_obj
+ except utils.UnityNfsShareNameExistedError as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
+ except Exception as e:
+ msg = "Failed to create nfs share: %s error: %s" % (name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def create_nfs_share(self):
+ """ Create nfs share from either filesystem/snapshot
+
+ :return: nfs_share object
+ :rtype: UnityNfsShare
+ """
+ if self.is_given_nfs_for_fs:
+ # Share to be created from filesystem
+ return self.create_nfs_share_from_filesystem()
+ elif self.is_given_nfs_for_fs is False:
+ # Share to be created from snapshot
+ return self.create_nfs_share_from_snapshot()
+ else:
+ msg = "Please provide filesystem or filesystem snapshot to create NFS export"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def convert_host_str_to_list(self, host_str):
+ """ Convert host_str which have comma separated hosts to host_list with
+ ip4/ip6 host obj if IP4/IP6 like string found
+
+ :param host_str: hosts str separated by comma
+ :return: hosts list, which may contains IP4/IP6 object if given in
+ host_str
+ :rytpe: list
+ """
+ if not host_str:
+ LOG.debug("Empty host_str given")
+ return []
+
+ host_list = []
+ try:
+ for h in host_str.split(","):
+ version = get_ip_version(h)
+ if version == 4:
+ h = u'{0}'.format(h)
+ h = IPv4Network(h, strict=False)
+ elif version == 6:
+ h = u'{0}'.format(h)
+ h = IPv6Network(h, strict=False)
+ host_list.append(h)
+ except Exception as e:
+ msg = "Error while converting host_str: %s to list error: %s" % (
+ host_str, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ return host_list
+
+ def add_host_dict_for_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & adds up new hosts with the existing ones and provide
+ the final consolidated hosts for advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be added
+ :type new_host_dict: dict
+ :return: consolidated hosts params details which contains newly added
+ hosts along with the existing ones
+ :rtype: dict
+ """
+ modify_host_dict = {}
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking for param: %s", host_access_key)
+ new_host_obj_list = new_host_dict[host_access_key]
+ if new_host_obj_list and not existing_host_dict[host_access_key]:
+ # Existing nfs host is empty so lets directly add
+ # new_host_str as it is
+ LOG.debug("Existing nfs host key: %s is empty, so lets add new host given value as it is", host_access_key)
+ modify_host_dict[host_access_key] = new_host_obj_list
+ continue
+
+ existing_host_obj_list = [self.get_host_obj(host_id=existing_host_dict['UnityHost']['id'])
+ for existing_host_dict in existing_host_dict[host_access_key]['UnityHostList']]
+
+ if not new_host_obj_list:
+ LOG.debug("Nothing to add as no host given")
+ continue
+
+ existing_set = set(host.id for host in existing_host_obj_list)
+ actual_to_add = [new_host for new_host in new_host_obj_list if new_host.id not in existing_set]
+
+ if not actual_to_add:
+ LOG.debug("All host given to be added is already added")
+ continue
+
+ # Lets extends actual_to_add list, which is new with existing
+ actual_to_add.extend(existing_host_obj_list)
+ modify_host_dict[host_access_key] = actual_to_add
+
+ return modify_host_dict
+
+ def add_host_dict_for_non_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & adds up new hosts with the existing ones and provide
+ the final consolidated hosts for non advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be added
+ :type new_host_dict: dict
+ :return: consolidated hosts params details which contains newly added
+ hosts along with the existing ones
+ :rtype: dict
+ """
+ modify_host_dict = {}
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking add host for param: %s", host_access_key)
+ existing_host_str = existing_host_dict[host_access_key]
+ existing_host_list = self.convert_host_str_to_list(
+ existing_host_str)
+
+ new_host_str = new_host_dict[host_access_key]
+ new_host_list = self.convert_host_str_to_list(
+ new_host_str)
+
+ if not new_host_list:
+ LOG.debug("Nothing to add as no host given")
+ continue
+
+ if new_host_list and not existing_host_list:
+ # Existing nfs host is empty so lets directly add
+ # new_host_str as it is
+ LOG.debug("Existing nfs host key: %s is empty, so lets add new host given value as it is", host_access_key)
+ modify_host_dict[host_access_key] = new_host_str
+ continue
+
+ actual_to_add = list(set(new_host_list) - set(existing_host_list))
+ if not actual_to_add:
+ LOG.debug("All host given to be added is already added")
+ continue
+
+ # Lets extends actual_to_add list, which is new with existing
+ actual_to_add.extend(existing_host_list)
+
+ # Since SDK takes host_str as ',' separated instead of list, so
+ # lets convert str to list
+ # Note: explicity str() needed here to convert IP4/IP6 object
+ modify_host_dict[host_access_key] = ",".join(str(v) for v in actual_to_add)
+ return modify_host_dict
+
+ def remove_host_dict_for_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & remove new hosts from the existing ones and provide
+ the remaining hosts for advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be removed
+ :type new_host_dict: dict
+ :return: existing hosts params details from which given new hosts are
+ removed
+ :rtype: dict
+ """
+ modify_host_dict = {}
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking host for param: %s", host_access_key)
+ if not existing_host_dict[host_access_key]:
+ # existing list is already empty, so nothing to remove
+ LOG.debug("Existing list is already empty, so nothing to remove")
+ continue
+
+ existing_host_obj_list = [self.get_host_obj(host_id=existing_host_dict['UnityHost']['id'])
+ for existing_host_dict in existing_host_dict[host_access_key]['UnityHostList']]
+ new_host_obj_list = new_host_dict[host_access_key]
+
+ if new_host_obj_list == []:
+ LOG.debug("Nothing to remove as no host given")
+ continue
+
+ unique_new_host_list = [new_host.id for new_host in new_host_obj_list]
+ if len(new_host_obj_list) > len(set(unique_new_host_list)):
+ msg = f'Duplicate host given: {unique_new_host_list} in host param: {host_access_key}'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ unique_existing_host_list = [host.id for host in existing_host_obj_list]
+ actual_to_remove = list(set(unique_new_host_list) & set(
+ unique_existing_host_list))
+ if not actual_to_remove:
+ continue
+
+ final_host_list = [existing_host for existing_host in existing_host_obj_list if existing_host.id not in unique_new_host_list]
+
+ modify_host_dict[host_access_key] = final_host_list
+
+ return modify_host_dict
+
+ def remove_host_dict_for_non_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & remove new hosts from the existing ones and provide
+ the remaining hosts for non advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be removed
+ :type new_host_dict: dict
+ :return: existing hosts params details from which given new hosts are
+ removed
+ :rtype: dict
+ """
+ modify_host_dict = {}
+
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking remove host for param: %s", host_access_key)
+ existing_host_str = existing_host_dict[host_access_key]
+ existing_host_list = self.convert_host_str_to_list(
+ existing_host_str)
+
+ new_host_str = new_host_dict[host_access_key]
+ new_host_list = self.convert_host_str_to_list(
+ new_host_str)
+
+ if not new_host_list:
+ LOG.debug("Nothing to remove as no host given")
+ continue
+
+ if len(new_host_list) > len(set(new_host_list)):
+ msg = "Duplicate host given: %s in host param: %s" % (
+ new_host_list, host_access_key)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if new_host_list and not existing_host_list:
+ # existing list is already empty, so nothing to remove
+ LOG.debug("Existing list is already empty, so nothing to remove")
+ continue
+
+ actual_to_remove = list(set(new_host_list) & set(
+ existing_host_list))
+ if not actual_to_remove:
+ continue
+
+ final_host_list = list(set(existing_host_list) - set(
+ actual_to_remove))
+
+ # Since SDK takes host_str as ',' separated instead of list, so
+ # lets convert str to list
+ # Note: explicity str() needed here to convert IP4/IP6 object
+ modify_host_dict[host_access_key] = ",".join(str(v) for v in final_host_list)
+
+ return modify_host_dict
+
+ def add_host(self, existing_host_dict, new_host_dict):
+ """ Compares & adds up new hosts with the existing ones and provide
+ the final consolidated hosts
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be added
+ :type new_host_dict: dict
+ :return: consolidated hosts params details which contains newly added
+ hosts along with the existing ones
+ :rtype: dict
+ """
+ if self.module.params['adv_host_mgmt_enabled']:
+ modify_host_dict = self.add_host_dict_for_adv(existing_host_dict, new_host_dict)
+ else:
+ modify_host_dict = self.add_host_dict_for_non_adv(existing_host_dict, new_host_dict)
+
+ return modify_host_dict
+
+ def remove_host(self, existing_host_dict, new_host_dict):
+ """ Compares & remove new hosts from the existing ones and provide
+ the remaining hosts
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be removed
+ :type new_host_dict: dict
+ :return: existing hosts params details from which given new hosts are
+ removed
+ :rtype: dict
+ """
+ if self.module.params['adv_host_mgmt_enabled']:
+ modify_host_dict = self.remove_host_dict_for_adv(existing_host_dict, new_host_dict)
+ else:
+ modify_host_dict = self.remove_host_dict_for_non_adv(existing_host_dict, new_host_dict)
+
+ return modify_host_dict
+
+ def modify_nfs_share(self, nfs_obj):
+ """ Modify given nfs share
+
+ :param nfs_obj: NFS share obj
+ :type nfs_obj: UnityNfsShare
+ :return: tuple(bool, nfs_obj)
+ - bool: indicates whether nfs_obj is modified or not
+ - nfs_obj: same nfs_obj if not modified else modified nfs_obj
+ :rtype: tuple
+ """
+ modify_param = {}
+ LOG.info("Modifying nfs share")
+
+ nfs_details = nfs_obj._get_properties()
+ fields = ('description', 'anonymous_uid', 'anonymous_gid')
+ for field in fields:
+ if self.module.params[field] is not None and \
+ self.module.params[field] != nfs_details[field]:
+ modify_param[field] = self.module.params[field]
+
+ if self.module.params['min_security'] and self.module.params[
+ 'min_security'] != nfs_obj.min_security.name:
+ modify_param['min_security'] = utils.NFSShareSecurityEnum[
+ self.module.params['min_security']]
+
+ if self.module.params['default_access']:
+ default_access = self.get_default_access()
+ if default_access != nfs_obj.default_access:
+ modify_param['default_access'] = default_access
+
+ new_host_dict = self.get_host_dict_from_pb()
+ if new_host_dict:
+ try:
+ if is_nfs_have_host_with_host_obj(nfs_details) and not self.module.params['adv_host_mgmt_enabled']:
+ msg = "Modification of nfs host is restricted using adv_host_mgmt_enabled as false since nfs " \
+ "already have host added using host obj"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ elif is_nfs_have_host_with_host_string(nfs_details) and self.module.params['adv_host_mgmt_enabled']:
+ msg = "Modification of nfs host is restricted using adv_host_mgmt_enabled as true since nfs " \
+ "already have host added without host obj"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ LOG.info("Extracting same given param from nfs")
+ existing_host_dict = {k: nfs_details[k] for k in new_host_dict}
+ except KeyError as e:
+ msg = "Failed to extract key-value from current nfs: %s" % \
+ str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if self.module.params['host_state'] == HOST_STATE_LIST[0]:
+ # present-in-export
+ LOG.info("Getting host to be added")
+ modify_host_dict = self.add_host(existing_host_dict, new_host_dict)
+ else:
+ # absent-in-export
+ LOG.info("Getting host to be removed")
+ modify_host_dict = self.remove_host(existing_host_dict, new_host_dict)
+
+ if modify_host_dict:
+ modify_param.update(modify_host_dict)
+
+ if not modify_param:
+ LOG.info("Existing nfs attribute value is same as given input, "
+ "so returning same nfs object - idempotency case")
+ return False, nfs_obj
+
+ modify_param = self.correct_payload_as_per_sdk(
+ modify_param, nfs_details)
+
+ try:
+ resp = nfs_obj.modify(**modify_param)
+ resp.raise_if_err()
+ except Exception as e:
+ msg = "Failed to modify nfs error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ return True, self.get_nfs_share(id=nfs_obj.id)
+
+ def perform_module_operation(self):
+ """ Perform different actions on nfs based on user parameter
+ chosen in playbook """
+
+ changed = False
+ nfs_share_details = {}
+
+ self.validate_input()
+
+ self.nas_obj = None
+ if self.module.params['nas_server_id'] or self.module.params[
+ 'nas_server_name']:
+ self.nas_obj = self.get_nas_from_given_input()
+
+ self.fs_obj = None
+ self.snap_obj = None
+ if self.is_given_nfs_for_fs:
+ self.fs_obj = self.get_filesystem()
+ elif self.is_given_nfs_for_fs is False:
+ self.snap_obj = self.get_snapshot()
+
+ # Get nfs Share
+ nfs_obj = self.get_nfs_share(
+ id=self.module.params['nfs_export_id'],
+ name=self.module.params['nfs_export_name']
+ )
+
+ # Delete nfs Share
+ if self.module.params['state'] == STATE_LIST[1]:
+ if nfs_obj:
+ # delete_nfs_share() does not return any value
+ # In case of successful delete, lets nfs_obj set None
+ # to avoid fetching and displaying attribute
+ nfs_obj = self.delete_nfs_share(nfs_obj)
+ changed = True
+ elif not nfs_obj:
+ # create
+ nfs_obj = self.create_nfs_share()
+ changed = True
+ else:
+ # modify
+ changed, nfs_obj = self.modify_nfs_share(nfs_obj)
+
+ # Get display attributes
+ if self.module.params['state'] and nfs_obj:
+ nfs_share_details = get_nfs_share_display_attrs(nfs_obj)
+
+ result = {"changed": changed,
+ "nfs_share_details": nfs_share_details}
+ self.module.exit_json(**result)
+
+
+def get_nfs_share_display_attrs(nfs_obj):
+ """ Provide nfs share attributes for display
+
+ :param nfs: NFS share obj
+ :type nfs: UnityNfsShare
+ :return: nfs_share_details
+ :rtype: dict
+ """
+ LOG.info("Getting nfs share details from nfs share object")
+ nfs_share_details = nfs_obj._get_properties()
+
+ # Adding filesystem_name to nfs_share_details
+ LOG.info("Updating filesystem details")
+ nfs_share_details['filesystem']['UnityFileSystem']['name'] = \
+ nfs_obj.filesystem.name
+ if 'id' not in nfs_share_details['filesystem']['UnityFileSystem']:
+ nfs_share_details['filesystem']['UnityFileSystem']['id'] = \
+ nfs_obj.filesystem.id
+
+ # Adding nas server details
+ LOG.info("Updating nas server details")
+ nas_details = nfs_obj.filesystem._get_properties()['nas_server']
+ nas_details['UnityNasServer']['name'] = \
+ nfs_obj.filesystem.nas_server.name
+ nfs_share_details['nas_server'] = nas_details
+
+ # Adding snap.id & snap.name if nfs_obj is for snap
+ if is_nfs_obj_for_snap(nfs_obj):
+ LOG.info("Updating snap details")
+ nfs_share_details['snap']['UnitySnap']['id'] = nfs_obj.snap.id
+ nfs_share_details['snap']['UnitySnap']['name'] = nfs_obj.snap.name
+
+ LOG.info("Successfully updated nfs share details")
+ return nfs_share_details
+
+
+def is_nfs_have_host_with_host_obj(nfs_details):
+ """ Check whether nfs host is already added using host obj
+
+ :param nfs_details: nfs details
+ :return: True if nfs have host already added with host obj else False
+ :rtype: bool
+ """
+ host_obj_params = ('no_access_hosts', 'read_only_hosts',
+ 'read_only_root_access_hosts', 'read_write_hosts',
+ 'root_access_hosts')
+ for host_obj_param in host_obj_params:
+ if nfs_details.get(host_obj_param):
+ return True
+ return False
+
+
+def is_nfs_have_host_with_host_string(nfs_details):
+ """ Check whether nfs host is already added using host by string method
+
+ :param nfs_details: nfs details
+ :return: True if nfs have host already added with host string method else False
+ :rtype: bool
+ """
+ host_obj_params = (
+ 'no_access_hosts_string',
+ 'read_only_hosts_string',
+ 'read_only_root_hosts_string',
+ 'read_write_hosts_string',
+ 'read_write_root_hosts_string'
+ )
+ for host_obj_param in host_obj_params:
+ if nfs_details.get(host_obj_param):
+ return True
+ return False
+
+
+def get_ip_version(val):
+ try:
+ val = u'{0}'.format(val)
+ ip = ip_network(val, strict=False)
+ return ip.version
+ except ValueError:
+ return 0
+
+
+def is_nfs_obj_for_fs(nfs_obj):
+ """ Check whether the nfs_obj if for filesystem
+
+ :param nfs_obj: NFS share object
+ :return: True if nfs_obj is of filesystem type
+ :rtype: bool
+ """
+ if nfs_obj.type == utils.NFSTypeEnum.NFS_SHARE:
+ return True
+ return False
+
+
+def is_nfs_obj_for_snap(nfs_obj):
+ """ Check whether the nfs_obj if for snapshot
+
+ :param nfs_obj: NFS share object
+ :return: True if nfs_obj is of snapshot type
+ :rtype: bool
+ """
+ if nfs_obj.type == utils.NFSTypeEnum.NFS_SNAPSHOT:
+ return True
+ return False
+
+
+def get_nfs_parameters():
+ """ Provides parameters required for the NFS share module on Unity """
+
+ return dict(
+ nfs_export_name=dict(required=False, type='str'),
+ nfs_export_id=dict(required=False, type='str'),
+ filesystem_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ snapshot_id=dict(required=False, type='str'),
+ snapshot_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ path=dict(required=False, type='str', no_log=True),
+ description=dict(required=False, type='str'),
+ default_access=dict(required=False, type='str',
+ choices=DEFAULT_ACCESS_LIST),
+ min_security=dict(required=False, type='str',
+ choices=MIN_SECURITY_LIST),
+ adv_host_mgmt_enabled=dict(required=False, type='bool', default=None),
+ no_access_hosts=HOST_DICT,
+ read_only_hosts=HOST_DICT,
+ read_only_root_hosts=HOST_DICT,
+ read_write_hosts=HOST_DICT,
+ read_write_root_hosts=HOST_DICT,
+ host_state=dict(required=False, type='str', choices=HOST_STATE_LIST),
+ anonymous_uid=dict(required=False, type='int'),
+ anonymous_gid=dict(required=False, type='int'),
+ state=dict(required=True, type='str', choices=STATE_LIST)
+ )
+
+
+def main():
+ """ Create UnityNFS object and perform action on it
+ based on user input from playbook"""
+ obj = NFS()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py
new file mode 100644
index 000000000..e492e3af0
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py
@@ -0,0 +1,494 @@
+#!/usr/bin/python
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing NFS server on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: nfsserver
+version_added: '1.4.0'
+short_description: Manage NFS server on Unity storage system
+description:
+- Managing the NFS server on the Unity storage system includes creating NFS server, getting NFS server details
+ and deleting NFS server attributes.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
+options:
+ nas_server_name:
+ description:
+ - Name of the NAS server on which NFS server will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which NFS server will be hosted.
+ type: str
+ nfs_server_id:
+ description:
+ - ID of the NFS server.
+ type: str
+ host_name:
+ description:
+ - Host name of the NFS server.
+ type: str
+ nfs_v4_enabled:
+ description:
+ - Indicates whether the NFSv4 is enabled on the NAS server.
+ type: bool
+ is_secure_enabled:
+ description:
+ - Indicates whether the secure NFS is enabled.
+ type: bool
+ kerberos_domain_controller_type:
+ description:
+ - Type of Kerberos Domain Controller used for secure NFS service.
+ choices: [CUSTOM, UNIX, WINDOWS]
+ type: str
+ kerberos_domain_controller_username:
+ description:
+ - Kerberos Domain Controller administrator username.
+ type: str
+ kerberos_domain_controller_password:
+ description:
+ - Kerberos Domain Controller administrator password.
+ type: str
+ is_extended_credentials_enabled:
+ description:
+ - Indicates whether support for more than 16 unix groups in a Unix credential.
+ type: bool
+ remove_spn_from_kerberos:
+ description:
+ - Indicates whether to remove the SPN from Kerberos Domain Controller.
+ default: true
+ type: bool
+ state:
+ description:
+ - Define whether the NFS server should exist or not.
+ choices: [absent, present]
+ required: true
+ type: str
+notes:
+- The I(check_mode) is supported.
+- Modify operation for NFS Server is not supported.
+- When I(kerberos_domain_controller_type) is C(UNIX), I(kdc_type) in I(nfs_server_details) output is displayed as C(null).
+'''
+
+EXAMPLES = r'''
+
+ - name: Create NFS server with kdctype as Windows
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: True
+ kerberos_domain_controller_type: "WINDOWS"
+ kerberos_domain_controller_username: "administrator"
+ kerberos_domain_controller_password: "Password123!"
+ is_extended_credentials_enabled: True
+ nfs_v4_enabled: True
+ state: "present"
+
+ - name: Create NFS server with kdctype as Unix
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: True
+ kerberos_domain_controller_type: "UNIX"
+ is_extended_credentials_enabled: True
+ nfs_v4_enabled: True
+ state: "present"
+
+ - name: Get NFS server details
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ state: "present"
+
+ - name: Delete NFS server
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ kerberos_domain_controller_username: "administrator"
+ kerberos_domain_controller_password: "Password123!"
+ unjoin_server_account: False
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+nfs_server_details:
+ description: Details of the NFS server.
+ returned: When NFS server exists
+ type: dict
+ contains:
+ credentials_cache_ttl:
+ description: Credential cache refresh timeout. Resolution is in minutes. Default value is 15 minutes.
+ type: str
+ existed:
+ description: Indicates if NFS Server exists.
+ type: bool
+ host_name:
+ description: Host name of the NFS server.
+ type: str
+ id:
+ description: Unique identifier of the NFS Server instance.
+ type: str
+ is_extended_credentials_enabled:
+ description: Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential.
+ type: bool
+ is_secure_enabled:
+ description: Indicates whether secure NFS is enabled on the NFS server.
+ type: bool
+ kdc_type:
+ description: Type of Kerberos Domain Controller used for secure NFS service.
+ type: str
+ nfs_v4_enabled:
+ description: Indicates whether NFSv4 is enabled on the NAS server.
+ type: bool
+ servicee_principal_name:
+ description: The Service Principal Name (SPN) for the NFS Server.
+ type: str
+ sample: {
+ "credentials_cache_ttl": "0:15:00",
+ "existed": true,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": 8778980109421,
+ "id": "if_37"
+ }
+ }
+ ]
+ },
+ "hash": 8778980109388,
+ "host_name": "dummy_nas23.pie.lab.emc.com",
+ "id": "nfs_51",
+ "is_extended_credentials_enabled": true,
+ "is_secure_enabled": true,
+ "kdc_type": "KdcTypeEnum.WINDOWS",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8778980109412
+ }
+ },
+ "nfs_v4_enabled": true,
+ "servicee_principal_name": null
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('nfsserver')
+
+application_type = "Ansible/1.6.0"
+
+
+class NFSServer(object):
+ """Class with NFS server operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_nfs_server_parameters())
+
+ mutually_exclusive = [['nas_server_name', 'nas_server_id']]
+ required_one_of = [['nfs_server_id', 'nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ LOG.info('Check Mode Flag %s', self.module.check_mode)
+
+ def get_nfs_server_details(self, nfs_server_id=None, nas_server_id=None):
+ """Get NFS server details.
+ :param: nfs_server_id: The ID of the NFS server
+ :param: nas_server_id: The name of the NAS server
+ :return: Dict containing NFS server details if exists
+ """
+ LOG.info("Getting NFS server details")
+ try:
+ if nfs_server_id:
+ nfs_server_details = self.unity_conn.get_nfs_server(_id=nfs_server_id)
+ return nfs_server_details._get_properties()
+ elif nas_server_id:
+ nfs_server_details = self.unity_conn.get_nfs_server(nas_server=nas_server_id)
+ if len(nfs_server_details) > 0:
+ return process_dict(nfs_server_details._get_properties())
+ return None
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = 'Incorrect username or password provided.'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ err_msg = "Failed to get details of NFS Server" \
+ " with error {0}".format(str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = "Failed to get details of NFS Server" \
+ " with error {0}".format(str(e))
+ LOG.error(err_msg)
+ return None
+
+ def get_nfs_server_instance(self, nfs_server_id):
+ """Get NFS server instance.
+ :param: nfs_server_id: The ID of the NFS server
+ :return: Return NFS server instance if exists
+ """
+
+ try:
+ nfs_server_obj = self.unity_conn.get_nfs_server(_id=nfs_server_id)
+ return nfs_server_obj
+ except Exception as e:
+ error_msg = "Failed to get the NFS server %s instance" \
+ " with error %s" % (nfs_server_id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_nfs_server(self, nfs_server_id, skip_unjoin=None, domain_username=None, domain_password=None):
+ """Delete NFS server.
+ :param: nfs_server_id: The ID of the NFS server
+ :param: skip_unjoin: Flag indicating whether to unjoin SMB server account from AD before deletion
+ :param: domain_username: The domain username
+ :param: domain_password: The domain password
+ :return: Return True if NFS server is deleted
+ """
+
+ LOG.info("Deleting NFS server")
+ try:
+ if not self.module.check_mode:
+ nfs_obj = self.get_nfs_server_instance(nfs_server_id=nfs_server_id)
+ nfs_obj.delete(skip_kdc_unjoin=skip_unjoin, username=domain_username, password=domain_password)
+ return True
+ except Exception as e:
+ msg = "Failed to delete NFS server: %s with error: %s" % (nfs_server_id, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_server_id(self, nas_server_name):
+ """Get NAS server ID.
+ :param: nas_server_name: The name of NAS server
+ :return: Return NAS server ID if exists
+ """
+
+ LOG.info("Getting NAS server ID")
+ try:
+ obj_nas = self.unity_conn.get_nas_server(name=nas_server_name)
+ return obj_nas.get_id()
+ except Exception as e:
+ msg = "Failed to get details of NAS server: %s with error: %s" % (nas_server_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def is_modification_required(self, is_extended_credentials_enabled, nfs_server_details):
+ """Check if modification is required in existing NFS server
+ :param: is_extended_credentials_enabled: Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential.
+ :param: nfs_server_details: NFS server details
+ :return: True if modification is required
+ """
+
+ LOG.info("Checking if any modification is required")
+ # Check for Extend Credential
+ if is_extended_credentials_enabled is not None and \
+ is_extended_credentials_enabled != nfs_server_details['is_extended_credentials_enabled']:
+ return True
+
+ def create_nfs_server(self, nas_server_id, host_name=None, nfs_v4_enabled=None, is_secure_enabled=None,
+ kerberos_domain_controller_type=None, kerberos_domain_controller_username=None,
+ kerberos_domain_controller_password=None, is_extended_credentials_enabled=None):
+ """Create NFS server.
+ :param: nas_server_id: The ID of NAS server.
+ :param: host_name: Name of NFS Server.
+ :param: nfs_v4_enabled: Indicates whether the NFSv4 is enabled on the NAS server.
+ :param: is_secure_enabled: Indicates whether the secure NFS is enabled.
+ :param: kerberos_domain_controller_type: Type of Kerberos Domain Controller used for secure NFS service.
+ :param: kerberos_domain_controller_username: Kerberos Domain Controller administrator username.
+ :param: kerberos_domain_controller_password: Kerberos Domain Controller administrator password.
+ :param: is_extended_credentials_enabled: Indicates whether support for more than 16 unix groups in a Unix credential.
+ """
+
+ LOG.info("Creating NFS server")
+ try:
+ if not self.module.check_mode:
+ kdc_enum_type = get_enum_kdctype(kerberos_domain_controller_type)
+ if kerberos_domain_controller_type == "UNIX":
+ is_extended_credentials_enabled = None
+ is_secure_enabled = None
+ utils.UnityNfsServer.create(cli=self.unity_conn._cli, nas_server=nas_server_id, host_name=host_name,
+ nfs_v4_enabled=nfs_v4_enabled,
+ is_secure_enabled=is_secure_enabled, kdc_type=kdc_enum_type,
+ kdc_username=kerberos_domain_controller_username,
+ kdc_password=kerberos_domain_controller_password,
+ is_extended_credentials_enabled=is_extended_credentials_enabled)
+ return True
+ except Exception as e:
+ msg = "Failed to create NFS server with on NAS Server %s with error: %s" % (nas_server_id, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_input_params(self):
+ param_list = ["nfs_server_id", "nas_server_id", "nas_server_name", "host_name", "kerberos_domain_controller_username",
+ "kerberos_domain_controller_password"]
+
+ for param in param_list:
+ msg = "Please provide valid value for: %s" % param
+ if self.module.params[param] is not None and len(self.module.params[param].strip()) == 0:
+ errmsg = msg.format(param)
+ self.module.fail_json(msg=errmsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on NFS server module based on parameters
+ passed in the playbook
+ """
+ nfs_server_id = self.module.params['nfs_server_id']
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_name = self.module.params['nas_server_name']
+ host_name = self.module.params['host_name']
+ nfs_v4_enabled = self.module.params['nfs_v4_enabled']
+ is_secure_enabled = self.module.params['is_secure_enabled']
+ kerberos_domain_controller_type = self.module.params['kerberos_domain_controller_type']
+ kerberos_domain_controller_username = self.module.params['kerberos_domain_controller_username']
+ kerberos_domain_controller_password = self.module.params['kerberos_domain_controller_password']
+ is_extended_credentials_enabled = self.module.params['is_extended_credentials_enabled']
+ remove_spn_from_kerberos = self.module.params['remove_spn_from_kerberos']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and NFS server details
+ result = dict(
+ changed=False,
+ nfs_server_details={}
+ )
+
+ modify_flag = False
+
+ self.validate_input_params()
+
+ if nas_server_name:
+ nas_server_id = self.get_nas_server_id(nas_server_name)
+
+ nfs_server_details = self.get_nfs_server_details(nfs_server_id=nfs_server_id,
+ nas_server_id=nas_server_id)
+
+ # Check if modification is required
+ if nfs_server_details and state == 'present':
+ modify_flag = self.is_modification_required(is_extended_credentials_enabled, nfs_server_details)
+ if modify_flag:
+ self.module.fail_json(msg="Modification of NFS Server parameters is not supported through Ansible module")
+
+ if not nfs_server_details and state == 'present':
+ if not nas_server_id:
+ self.module.fail_json(msg="Please provide nas server id/name to create NFS server.")
+
+ result['changed'] = self.create_nfs_server(nas_server_id, host_name, nfs_v4_enabled,
+ is_secure_enabled, kerberos_domain_controller_type,
+ kerberos_domain_controller_username,
+ kerberos_domain_controller_password,
+ is_extended_credentials_enabled)
+
+ if state == 'absent' and nfs_server_details:
+ skip_unjoin = not remove_spn_from_kerberos
+ result['changed'] = self.delete_nfs_server(nfs_server_details["id"], skip_unjoin,
+ kerberos_domain_controller_username,
+ kerberos_domain_controller_password)
+
+ if state == 'present':
+ result['nfs_server_details'] = self.get_nfs_server_details(nfs_server_id=nfs_server_id,
+ nas_server_id=nas_server_id)
+ self.module.exit_json(**result)
+
+
+def get_nfs_server_parameters():
+ """This method provide parameters required for the ansible
+ NFS server module on Unity"""
+ return dict(
+ nfs_server_id=dict(type='str'),
+ host_name=dict(type='str'),
+ nfs_v4_enabled=dict(type='bool'),
+ is_secure_enabled=dict(type='bool'),
+ kerberos_domain_controller_type=dict(type='str', choices=['UNIX', 'WINDOWS', 'CUSTOM']),
+ kerberos_domain_controller_username=dict(type='str'),
+ kerberos_domain_controller_password=dict(type='str', no_log=True),
+ nas_server_name=dict(type='str'),
+ nas_server_id=dict(type='str'),
+ is_extended_credentials_enabled=dict(type='bool'),
+ remove_spn_from_kerberos=dict(default=True, type='bool'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ )
+
+
+def get_enum_kdctype(kerberos_domain_controller_type):
+ """Getting correct enum values for kerberos_domain_controller_type
+ :param: kerberos_domain_controller_type: Type of Kerberos Domain Controller used for secure NFS service.
+ :return: enum value for kerberos_domain_controller_type.
+ """
+
+ if utils.KdcTypeEnum[kerberos_domain_controller_type]:
+ kerberos_domain_controller_type = utils.KdcTypeEnum[kerberos_domain_controller_type]
+ return kerberos_domain_controller_type
+
+
+def process_dict(nfs_server_details):
+ """Process NFS server details.
+ :param: nfs_server_details: Dict containing NFS server details
+ :return: Processed dict containing NFS server details
+ """
+ param_list = ['credentials_cache_ttl', 'file_interfaces', 'host_name', 'id', 'kdc_type', 'nas_server', 'is_secure_enabled',
+ 'is_extended_credentials_enabled', 'nfs_v4_enabled', 'servicee_principal_name']
+
+ for param in param_list:
+ if param in nfs_server_details and param == 'credentials_cache_ttl':
+ nfs_server_details[param] = str(nfs_server_details[param][0])
+ else:
+ nfs_server_details[param] = nfs_server_details[param][0]
+ return nfs_server_details
+
+
+def main():
+ """Create Unity NFS server object and perform action on it
+ based on user input from playbook"""
+ obj = NFSServer()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/smbshare.py b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py
new file mode 100644
index 000000000..58bc8c709
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py
@@ -0,0 +1,877 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: smbshare
+version_added: '1.1.0'
+short_description: Manage SMB shares on Unity storage system
+extends_documentation_fragment:
+- dellemc.unity.unity
+author:
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+description:
+- Managing SMB Shares on Unity storage system includes create, get,
+ modify, and delete the smb shares.
+options:
+ share_name:
+ description:
+ - Name of the SMB share.
+ - Required during creation of the SMB share.
+ - For all other operations either I(share_name) or I(share_id) is required.
+ type: str
+ share_id:
+ description:
+ - ID of the SMB share.
+ - Should not be specified during creation. Id is auto generated.
+ - For all other operations either I(share_name) or I(share_id) is required.
+ - If I(share_id) is used then no need to pass nas_server/filesystem/snapshot/path.
+ type: str
+ path:
+ description:
+ - Local path to the file system/Snapshot or any existing sub-folder of
+ the file system/Snapshot that is shared over the network.
+ - Path is relative to the root of the filesystem.
+ - Required for creation of the SMB share.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the File System.
+ - Either I(filesystem_name) or I(filesystem_id) is required for creation of the SMB share for filesystem.
+ - If I(filesystem_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the filesystem.
+ - Options I(filesystem_name) and I(filesystem_id) are mutually exclusive parameters.
+ type: str
+ snapshot_id:
+ description:
+ - The ID of the Filesystem Snapshot.
+ - Either I(snapshot_name) or I(snapshot_id) is required for creation of the SMB share for a snapshot.
+ - If I(snapshot_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the snapshot.
+ - Options I(snapshot_name) and I(snapshot_id) are mutually exclusive parameters.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS Server.
+ - It is not required if I(share_id) is used.
+ type: str
+ filesystem_name:
+ description:
+ - The Name of the File System.
+ - Either I(filesystem_name) or I(filesystem_id) is required for creation of the SMB share for filesystem.
+ - If I(filesystem_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the filesystem.
+ - Options I(filesystem_name) and I(filesytem_id) are mutually exclusive parameters.
+ type: str
+ snapshot_name:
+ description:
+ - The Name of the Filesystem Snapshot.
+ - Either I(snapshot_name) or I(snapshot_id) is required for creation of the SMB share for a snapshot.
+ - If I(snapshot_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the snapshot.
+ - Options I(snapshot_name) and I(snapshot_id) are mutually exclusive parameters.
+ type: str
+ nas_server_name:
+ description:
+ - The Name of the NAS Server.
+ - It is not required if I(share_id) is used.
+ - Options I(nas_server_name) and I(nas_server_id) are mutually exclusive parameters.
+ type: str
+ description:
+ description:
+ - Description for the SMB share.
+ - Optional parameter when creating a share.
+ - To modify, pass the new value in description field.
+ type: str
+ is_abe_enabled:
+ description:
+ - Indicates whether Access-based Enumeration (ABE) for SMB share is enabled.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ is_branch_cache_enabled:
+ description:
+ - Indicates whether Branch Cache optimization for SMB share is enabled.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ is_continuous_availability_enabled:
+ description:
+ - Indicates whether continuous availability for SMB 3.0 is enabled.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ is_encryption_enabled:
+ description:
+ - Indicates whether encryption for SMB 3.0 is enabled at the shared folder level.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ offline_availability:
+ description:
+ - Defines valid states of Offline Availability.
+ - C(MANUAL)- Only specified files will be available offline.
+ - C(DOCUMENTS)- All files that users open will be available offline.
+ - C(PROGRAMS)- Program will preferably run from the offline cache even when
+ connected to the network. All files that users open will be available offline.
+ - C(NONE)- Prevents clients from storing documents and programs in offline cache.
+ type: str
+ choices: ["MANUAL","DOCUMENTS","PROGRAMS","NONE"]
+ umask:
+ description:
+ - The default UNIX umask for new files created on the SMB Share.
+ type: str
+ state:
+ description:
+ - Define whether the SMB share should exist or not.
+ - Value C(present) indicates that the share should exist on the system.
+ - Value C(absent) indicates that the share should not exist on the system.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+notes:
+- When ID/Name of the filesystem/snapshot is passed then I(nas_server) is not required.
+ If passed, then filesystem/snapshot should exist for the mentioned I(nas_server),
+ else the task will fail.
+- The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+- name: Create SMB share for a filesystem
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_smb_share"
+ filesystem_name: "sample_fs"
+ nas_server_id: "NAS_11"
+ path: "/sample_fs"
+ description: "Sample SMB share created"
+ is_abe_enabled: True
+ is_branch_cache_enabled: True
+ offline_availability: "DOCUMENTS"
+ is_continuous_availability_enabled: True
+ is_encryption_enabled: True
+ umask: "777"
+ state: "present"
+- name: Modify Attributes of SMB share for a filesystem
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_smb_share"
+ nas_server_name: "sample_nas_server"
+ description: "Sample SMB share attributes updated"
+ is_abe_enabled: False
+ is_branch_cache_enabled: False
+ offline_availability: "MANUAL"
+ is_continuous_availability_enabled: "False"
+ is_encryption_enabled: "False"
+ umask: "022"
+ state: "present"
+- name: Create SMB share for a snapshot
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_snap_smb_share"
+ snapshot_name: "sample_snapshot"
+ nas_server_id: "NAS_11"
+ path: "/sample_snapshot"
+ description: "Sample SMB share created for snapshot"
+ is_abe_enabled: True
+ is_branch_cache_enabled: True
+ is_continuous_availability_enabled: True
+ is_encryption_enabled: True
+ umask: "777"
+ state: "present"
+- name: Modify Attributes of SMB share for a snapshot
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_snap_smb_share"
+ snapshot_name: "sample_snapshot"
+ description: "Sample SMB share attributes updated for snapshot"
+ is_abe_enabled: False
+ is_branch_cache_enabled: False
+ offline_availability: "MANUAL"
+ is_continuous_availability_enabled: "False"
+ is_encryption_enabled: "False"
+ umask: "022"
+ state: "present"
+- name: Get details of SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_id: "{{smb_share_id}}"
+ state: "present"
+- name: Delete SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_id: "{{smb_share_id}}"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+smb_share_details:
+ description: The SMB share details.
+ type: dict
+ returned: When share exists.
+ contains:
+ id:
+ description: The ID of the SMB share.
+ type: str
+ name:
+ description: Name of the SMB share.
+ type: str
+ sample: "sample_smb_share"
+ filesystem_id:
+ description: The ID of the Filesystem.
+ type: str
+ filesystem_name:
+ description: The Name of the filesystem
+ type: str
+ snapshot_id:
+ description: The ID of the Snapshot.
+ type: str
+ snapshot_name:
+ description: The Name of the Snapshot.
+ type: str
+ nas_server_id:
+ description: The ID of the nas_server.
+ type: str
+ nas_server_name:
+ description: The Name of the nas_server.
+ type: str
+ description:
+ description: Additional information about the share.
+ type: str
+ sample: This share is created for demo purpose only.
+ is_abe_enabled:
+ description: Whether Access Based enumeration is enforced or not.
+ type: bool
+ sample: false
+ is_branch_cache_enabled:
+ description: Whether branch cache is enabled or not.
+ type: bool
+ sample: false
+ is_continuous_availability_enabled:
+ description: Whether the share will be available continuously or not.
+ type: bool
+ sample: false
+ is_encryption_enabled:
+ description: Whether encryption is enabled or not.
+ type: bool
+ sample: false
+ umask:
+ description: Unix mask for the SMB share.
+ type: str
+ sample: {
+ "creation_time": "2022-03-17 11:56:54.867000+00:00",
+ "description": "",
+ "existed": true,
+ "export_paths": [
+ "\\\\multi-prot-pie.extreme1.com\\multi-prot-hui",
+ "\\\\10.230.24.26\\multi-prot-hui"
+ ],
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8748426746492
+ }
+ },
+ "filesystem_id": "fs_140",
+ "filesystem_name": "multi-prot-hui",
+ "hash": 8748426746588,
+ "id": "SMBShare_20",
+ "is_abe_enabled": false,
+ "is_ace_enabled": false,
+ "is_branch_cache_enabled": false,
+ "is_continuous_availability_enabled": false,
+ "is_dfs_enabled": false,
+ "is_encryption_enabled": false,
+ "is_read_only": null,
+ "modified_time": "2022-03-17 11:56:54.867000+00:00",
+ "name": "multi-prot-hui",
+ "nas_server_id": "nas_5",
+ "nas_server_name": "multi-prot",
+ "offline_availability": "CifsShareOfflineAvailabilityEnum.NONE",
+ "path": "/",
+ "snap": null,
+ "type": "CIFSTypeEnum.CIFS_SHARE",
+ "umask": "022"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('smbshare')
+
+application_type = "Ansible/1.6.0"
+
+
+class SMBShare(object):
+ """Class with SMB Share operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_smb_share_parameters())
+
+ # initialize the ansible module
+ mut_ex_args = [['share_name', 'share_id'],
+ ['nas_server_name', 'nas_server_id'],
+ ['filesystem_name', 'snapshot_name',
+ 'filesystem_id', 'snapshot_id'],
+ ['share_id', 'nas_server_name'],
+ ['share_id', 'nas_server_id'],
+ ['share_id', 'filesystem_name'],
+ ['share_id', 'filesystem_id'],
+ ['share_id', 'path'],
+ ['share_id', 'snapshot_name'],
+ ['share_id', 'snapshot_id']]
+ required_one_of = [['share_id', 'share_name']]
+
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mut_ex_args,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # snapshot details
+ self.result = {"changed": False,
+ 'smb_share_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.smb_share_conn_obj = utils.cifs_share.UnityCifsShare(
+ self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def get_offline_availability_enum(self, offline_availability):
+ """
+ Get the enum of the Offline Availability parameter.
+ :param offline_availability: The offline_availability string
+ :return: offline_availability enum
+ """
+ if offline_availability in \
+ utils.CifsShareOfflineAvailabilityEnum.__members__:
+ return utils.CifsShareOfflineAvailabilityEnum[
+ offline_availability]
+ else:
+ error_msg = "Invalid value {0} for offline availability" \
+ " provided".format(offline_availability)
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_smb_share_obj(self, share_id=None, share_name=None,
+ filesystem_obj=None, snap_obj=None, nas_obj=None):
+ """Get SMB share details"""
+ msg = "Failed to get details of SMB Share {0} with error {1} "
+ smb_share = share_name if share_name else share_id
+ try:
+ if share_id:
+ obj_smb = self.unity_conn.get_cifs_share(_id=share_id)
+ if obj_smb and obj_smb.existed:
+ LOG.info("Successfully got the SMB share "
+ "object %s ", obj_smb)
+ return obj_smb
+
+ elif share_name is not None and filesystem_obj:
+ # There might be a case where SMB share with same name exists
+ # for different nas server. Hence, filesystem_obj is passed
+ # along with share name to get a unique resource.
+ return self.unity_conn.get_cifs_share(
+ name=share_name, filesystem=filesystem_obj)
+
+ elif share_name is not None and snap_obj:
+ # There might be a case where SMB share with same name exists
+ # for different nas server. Hence, snap_obj is passed
+ # along with share name to get a unique resource.
+ return self.unity_conn.get_cifs_share(
+ name=share_name, snap=snap_obj)
+
+ # This elif is addressing scenario where nas server details is
+ # passed and neither filesystem nor snapshot details are passed.
+ elif share_name is not None and nas_obj:
+ # Multiple smb shares can be received, as only name is passed
+ smb_share_obj = self.unity_conn.get_cifs_share(
+ name=share_name)
+
+ # Checking if instance or list of instance is returned.
+ if isinstance(smb_share_obj,
+ utils.cifs_share.UnityCifsShareList):
+ LOG.info("Multiple SMB share with same name found.")
+ smb_share_obj_list = smb_share_obj
+
+ for smb_share in smb_share_obj_list:
+ if smb_share.filesystem.nas_server == nas_obj:
+ return smb_share
+
+ msg = "No SMB share found with the given NAS Server." \
+ " Please provide correct share name and" \
+ " nas server details."
+ return None
+
+ # Below statements will execute when there is only single
+ # smb share returned.
+ if smb_share_obj.filesystem.nas_server == nas_obj:
+ return smb_share_obj
+ msg = "No SMB share found with the given NAS Server." \
+ " Please provide correct share name and" \
+ " nas server details."
+ return None
+
+ else:
+ self.module.fail_json(
+ msg="Share Name is Passed. Please enter Filesystem/"
+ "Snapshot/NAS Server Resource along with share_name"
+ " to get the details of the SMB share")
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = msg.format(smb_share, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = msg.format(smb_share, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ err_msg = msg.format(smb_share, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def create_smb_share(self, share_name, path, filesystem_obj=None,
+ snapshot_obj=None, description=None,
+ is_abe_enabled=None, is_branch_cache_enabled=None,
+ is_continuous_availability_enabled=None,
+ is_encryption_enabled=None,
+ offline_availability=None, umask=None):
+ """
+ Create SMB Share
+ :return: SMB Share Object if successful, else error.
+ """
+ if path is None or path == "":
+ self.module.fail_json(msg="Please enter a valid path."
+ " Empty string or None provided.")
+ if not filesystem_obj and not snapshot_obj:
+ self.module.fail_json(msg="Either Filesystem or Snapshot "
+ "Resource's Name/ID is required to"
+ " Create a SMB share")
+ try:
+ if filesystem_obj:
+ return self.smb_share_conn_obj.create(
+ cli=self.unity_conn._cli, name=share_name,
+ fs=filesystem_obj, path=path,
+ is_encryption_enabled=is_encryption_enabled,
+ is_con_avail_enabled=is_continuous_availability_enabled,
+ is_abe_enabled=is_abe_enabled,
+ is_branch_cache_enabled=is_branch_cache_enabled,
+ umask=umask, description=description,
+ offline_availability=offline_availability)
+ else:
+ return self.smb_share_conn_obj.create_from_snap(
+ cli=self.unity_conn._cli, name=share_name,
+ snap=snapshot_obj, path=path,
+ is_encryption_enabled=is_encryption_enabled,
+ is_con_avail_enabled=is_continuous_availability_enabled,
+ is_abe_enabled=is_abe_enabled,
+ is_branch_cache_enabled=is_branch_cache_enabled,
+ umask=umask, description=description,
+ offline_availability=offline_availability)
+
+ except Exception as e:
+ error_msg = "Failed to create SMB share" \
+ " %s with error %s" % (share_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_filesystem(self, filesystem_id=None, filesystem_name=None,
+ nas_server_obj=None):
+ """
+ Get the Filesystem Object.
+ :param filesystem_id: ID of the Filesystem.
+ :param filesystem_name: Name of the filesystem.
+ :param nas_server_obj: NAS Server object.
+ :return: Object of the filesystem.
+ """
+ try:
+ if filesystem_id:
+ obj_fs = self.unity_conn.get_filesystem(_id=filesystem_id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem "
+ "object %s ", obj_fs)
+ return obj_fs
+ else:
+ return self.unity_conn.get_filesystem(
+ name=filesystem_name, nas_server=nas_server_obj)
+ return None
+ except Exception as e:
+ filesystem = filesystem_name if filesystem_name \
+ else filesystem_id
+ err_msg = "Failed to get filesystem details {0} with" \
+ " error {1}".format(filesystem, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_snapshot(self, snapshot_name, snapshot_id):
+ """
+ Get the Snapshot Object.
+ :param snapshot_id: ID of the Snapshot.
+ :param snapshot_name: Name of the Snapshot
+ :return: Object of the filesystem.
+ """
+ try:
+ obj_snap = self.unity_conn.get_snap(_id=snapshot_id,
+ name=snapshot_name)
+ if snapshot_id and obj_snap and not obj_snap.existed:
+ LOG.info("Snapshot object does not exist %s ", obj_snap)
+ return None
+ return obj_snap
+ except Exception as e:
+ snapshot = snapshot_name if snapshot_name else snapshot_id
+ err_msg = "Failed to get filesystem snapshots details {0} with" \
+ " error {1}".format(snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_nas_server(self, nas_server_name, nas_server_id):
+ """
+ Get the NAS Server Object using NAME/ID of the NAS Server.
+ :param nas_server_name: Name of the NAS Server
+ :param nas_server_id: ID of the NAS Server
+ :return: NAS Server object.
+ """
+ nas_server = nas_server_name if nas_server_name else nas_server_id
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id,
+ name=nas_server_name)
+ if nas_server_id and obj_nas and not obj_nas.existed:
+ LOG.info("NAS Server object does not exist %s ", obj_nas)
+ return None
+ return obj_nas
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = "Failed to get details of NAS Server" \
+ " {0} with error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ except Exception as e:
+ nas_server = nas_server_name if nas_server_name \
+ else nas_server_id
+ err_msg = "Failed to get nas server details {0} with" \
+ " error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def delete_smb_share(self, smb_share_obj):
+ """
+ Delete SMB share if exists, else thrown error.
+ """
+ try:
+ smb_share_obj.delete()
+ except Exception as e:
+ error_msg = "Failed to Delete SMB share" \
+ " %s with error %s" % (smb_share_obj.name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_update(self, smb_share_obj):
+ LOG.info("Checking Whether the parameters are modified or not.")
+
+ offline_availability = self.module.params['offline_availability']
+ # Get the enum for the corresponding offline_availability
+ if offline_availability:
+ offline_availability = \
+ self.get_offline_availability_enum(offline_availability)
+ if offline_availability is not None and \
+ offline_availability != smb_share_obj.offline_availability:
+ return True
+
+ smb_share_dict = smb_share_obj._get_properties()
+ params_list = ['is_abe_enabled', 'is_branch_cache_enabled',
+ 'is_continuous_availability_enabled',
+ 'is_encryption_enabled', 'description', 'umask']
+ for param in params_list:
+ if self.module.params[param] is not None and \
+ self.module.params[param] != smb_share_dict[param]:
+ return True
+ return False
+
+ def update_smb_share(self, smb_share_obj, is_encryption_enabled=None,
+ is_continuous_availability_enabled=None,
+ is_abe_enabled=None,
+ is_branch_cache_enabled=None,
+ umask=None, description=None,
+ offline_availability=None):
+ """
+ The Details of the SMB share will be updated in the function.
+ """
+ try:
+ smb_share_obj.modify(
+ is_encryption_enabled=is_encryption_enabled,
+ is_con_avail_enabled=is_continuous_availability_enabled,
+ is_abe_enabled=is_abe_enabled,
+ is_branch_cache_enabled=is_branch_cache_enabled,
+ umask=umask, description=description,
+ offline_availability=offline_availability)
+
+ except Exception as e:
+ error_msg = "Failed to Update parameters of SMB share" \
+ " %s with error %s" % (smb_share_obj.name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on SMB share based on user parameters
+ chosen in playbook
+ """
+ state = self.module.params['state']
+ share_name = self.module.params['share_name']
+ filesystem_name = self.module.params['filesystem_name']
+ snapshot_name = self.module.params['snapshot_name']
+ nas_server_name = self.module.params['nas_server_name']
+ share_id = self.module.params['share_id']
+ filesystem_id = self.module.params['filesystem_id']
+ snapshot_id = self.module.params['snapshot_id']
+ nas_server_id = self.module.params['nas_server_id']
+ path = self.module.params['path']
+
+ description = self.module.params['description']
+ is_branch_cache_enabled = \
+ self.module.params['is_branch_cache_enabled']
+ is_continuous_availability_enabled = \
+ self.module.params['is_continuous_availability_enabled']
+ is_encryption_enabled = self.module.params['is_encryption_enabled']
+ is_abe_enabled = self.module.params['is_abe_enabled']
+ umask = self.module.params['umask']
+
+ offline_availability = self.module.params['offline_availability']
+ # Get the enum for the corresponding offline_availability
+ if offline_availability:
+ offline_availability = \
+ self.get_offline_availability_enum(offline_availability)
+
+ changed = False
+ '''
+ Validate parameters.
+ '''
+ if share_id is not None and \
+ (share_id == "" or len(share_id.split()) == 0):
+ self.module.fail_json(msg="Invalid share id provided."
+ " Please enter a valid share ID.")
+
+ '''
+ Get details of NAS Server, if entered.
+ '''
+ nas_server_obj = None
+ if nas_server_name or nas_server_id:
+ nas_server_obj = self.get_nas_server(nas_server_name,
+ nas_server_id)
+ if nas_server_obj:
+ msg = "NAS Server Object:" \
+ " {0}".format(nas_server_obj._get_properties())
+ LOG.info(msg)
+ else:
+ msg = "NAS Server Resource not fetched."
+ LOG.info(msg)
+
+ '''
+ Get details of Filesystem, if entered.
+ '''
+ filesystem_obj = None
+ if filesystem_id:
+ filesystem_obj = self.get_filesystem(filesystem_id)
+ if filesystem_name:
+ # nas_server_obj is required to uniquely identify filesystem
+ # resource. If neither nas_server_name nor nas_server_id
+ # is passed along with filesystem_name then error is thrown.
+ if not nas_server_obj:
+ self.module.fail_json(msg="nas_server_id/nas_server_name is "
+ "required when filesystem_name is "
+ "passed")
+ filesystem_obj = self.get_filesystem(
+ None, filesystem_name, nas_server_obj)
+ if filesystem_obj:
+ msg = "Filesystem Object:" \
+ " {0}".format(filesystem_obj._get_properties())
+ LOG.info(msg)
+ # Checking if filesystem supports SMB protocol or not.
+ if filesystem_obj and \
+ filesystem_obj.supported_protocols.name == "NFS":
+ self.module.fail_json(msg="Cannot perform SMB share operations "
+ "as file system supports only NFS "
+ "protocol. Please enter a valid "
+ "Filesystem having supported protocol"
+ " as SMB or Multiprotocol.")
+ '''
+ Get details of Snapshot, if entered.
+ '''
+ snapshot_obj = None
+ if snapshot_id or snapshot_name:
+ # Snapshot Name and Snapshot ID both are unique across array.
+ # Hence no need to mention nas server details
+ snapshot_obj = self.get_snapshot(snapshot_name, snapshot_id)
+ if snapshot_obj:
+ msg = "Snapshot Object:" \
+ " {0}".format(snapshot_obj._get_properties())
+ LOG.info(msg)
+ else:
+ msg = "Snapshot Resource not fetched."
+ LOG.info(msg)
+
+ '''
+ Get the Details of the SMB Share
+ '''
+ smb_share_obj = self.get_smb_share_obj(
+ share_id, share_name, filesystem_obj, snapshot_obj,
+ nas_server_obj)
+ if smb_share_obj:
+ msg = "SMB Share Object:" \
+ " {0}".format(smb_share_obj._get_properties())
+ LOG.info(msg)
+ elif state == 'present' and share_id:
+ msg = "Unable to fetch SMB Share Resource. " \
+ "Incorrect SMB share id provided. " \
+ "Please enter a correct share id."
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ '''
+ Creation of SMB Share
+ '''
+ if state == "present" and not smb_share_obj:
+ smb_share_obj = self.create_smb_share(
+ share_name, path, filesystem_obj, snapshot_obj, description,
+ is_abe_enabled, is_branch_cache_enabled,
+ is_continuous_availability_enabled, is_encryption_enabled,
+ offline_availability, umask)
+ changed = True
+
+ '''
+ Update the SMB share details
+ '''
+ if state == "present" and smb_share_obj:
+ LOG.info("Modify the details of the SMB share.")
+ update_flag = self.to_update(smb_share_obj)
+ msg = "Update Flag: {0}".format(str(update_flag))
+ LOG.info(msg)
+ if update_flag:
+ self.update_smb_share(smb_share_obj, is_encryption_enabled,
+ is_continuous_availability_enabled,
+ is_abe_enabled,
+ is_branch_cache_enabled,
+ umask, description,
+ offline_availability)
+ changed = True
+
+ '''
+ Delete the SMB share details
+ '''
+ if state == "absent" and smb_share_obj:
+ self.delete_smb_share(smb_share_obj)
+ changed = True
+
+ '''
+ Update the changed state and SMB share details
+ '''
+
+ self.result["changed"] = changed
+ smb_details = self.prepare_output_dict(state, share_id, share_name,
+ filesystem_obj, snapshot_obj,
+ nas_server_obj)
+ self.result["smb_share_details"] = smb_details
+ self.module.exit_json(**self.result)
+
+ def prepare_output_dict(self, state, share_id, share_name,
+ filesystem_obj, snapshot_obj, nas_server_obj):
+ smb_share_details = None
+ smb_share_obj = None
+ if state == 'present':
+ smb_share_obj = self.get_smb_share_obj(
+ share_id, share_name, filesystem_obj,
+ snapshot_obj, nas_server_obj)
+ smb_share_details = smb_share_obj._get_properties()
+ if smb_share_details:
+ # Get Snapshot NAME and ID if SMB share exists for Snapshot
+ if smb_share_obj.type.name == "CIFS_SNAPSHOT":
+ smb_share_details['snapshot_name'] = smb_share_obj.snap.name
+ smb_share_details['snapshot_id'] = smb_share_obj.snap.id
+
+ # Get Filesystem NAME and ID
+ smb_share_details['filesystem_name'] = \
+ smb_share_obj.filesystem.name
+ smb_share_details['filesystem_id'] = smb_share_obj.filesystem.id
+
+ # Get NAS server NAME and ID
+ smb_share_details['nas_server_name'] = \
+ smb_share_obj.filesystem.nas_server.name
+ smb_share_details['nas_server_id'] = \
+ smb_share_obj.filesystem.nas_server.id
+ return smb_share_details
+
+
+def get_smb_share_parameters():
+ """
+ This method provides parameters required for the ansible smb share
+ modules on Unity
+ """
+
+ return dict(
+ share_name=dict(), share_id=dict(),
+ filesystem_name=dict(), filesystem_id=dict(),
+ snapshot_name=dict(), snapshot_id=dict(),
+ nas_server_name=dict(), nas_server_id=dict(),
+ path=dict(no_log=True), umask=dict(), description=dict(),
+ offline_availability=dict(
+ choices=["MANUAL", "DOCUMENTS", "PROGRAMS", "NONE"]),
+ is_abe_enabled=dict(type='bool'),
+ is_branch_cache_enabled=dict(type='bool'),
+ is_continuous_availability_enabled=dict(type='bool'),
+ is_encryption_enabled=dict(type='bool'),
+ state=dict(required=True, choices=['present', 'absent'], type='str')
+ )
+
+
+def main():
+ """ Create Unity SMB share object and perform action on it
+ based on user input from playbook"""
+ obj = SMBShare()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py
new file mode 100644
index 000000000..c8aba1846
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py
@@ -0,0 +1,751 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing Snapshots on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: snapshot
+short_description: Manage snapshots on the Unity storage system
+description:
+- Managing snapshots on the Unity storage system includes create snapshot,
+ delete snapshot, update snapshot, get snapshot, map host and unmap host.
+version_added: '1.1.0'
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+options:
+ snapshot_name:
+ description:
+ - The name of the snapshot.
+ - Mandatory parameter for creating a snapshot.
+ - For all other operations either I(snapshot_name) or I(snapshot_id) is
+ required.
+ type: str
+ vol_name:
+ description:
+ - The name of the volume for which snapshot is created.
+ - For creation of a snapshot either I(vol_name) or I(cg_name) is required.
+ - Not required for other operations.
+ type: str
+ cg_name:
+ description:
+ - The name of the Consistency Group for which snapshot is created.
+ - For creation of a snapshot either I(vol_name) or I(cg_name) is required.
+ - Not required for other operations.
+ type: str
+ snapshot_id:
+ description:
+ - The id of the snapshot.
+ - For all operations other than creation either I(snapshot_name) or
+ I(snapshot_id) is required.
+ type: str
+ auto_delete:
+ description:
+ - This option specifies whether the snapshot is auto deleted or not.
+ - If set to C(true), snapshot will expire based on the pool auto deletion
+ policy.
+ - If set to (false), snapshot will not be auto deleted
+ based on the pool auto deletion policy.
+ - Option I(auto_delete) can not be set to C(true), if I(expiry_time) is specified.
+ - If during creation neither I(auto_delete) nor I(expiry_time) is mentioned
+ then snapshot will be created keeping I(auto_delete) as C(true).
+ - Once the I(expiry_time) is set then snapshot cannot be assigned
+ to the auto delete policy.
+ type: bool
+ expiry_time:
+ description:
+ - This option is for specifying the date and time after which the
+ snapshot will expire.
+ - The time is to be mentioned in UTC timezone.
+ - The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
+ type: str
+ description:
+ description:
+ - The additional information about the snapshot can be provided using
+ this option.
+ type: str
+ new_snapshot_name:
+ description:
+ - New name for the snapshot.
+ type: str
+ state:
+ description:
+ - The I(state) option is used to mention the existence of
+ the snapshot.
+ type: str
+ required: true
+ choices: [ 'absent', 'present' ]
+ host_name:
+ description:
+ - The name of the host.
+ - Either I(host_name) or I(host_id) is required to map or unmap a snapshot from
+ a host.
+ - Snapshot can be attached to multiple hosts.
+ type: str
+ host_id:
+ description:
+ - The id of the host.
+ - Either I(host_name) or I(host_id) is required to map or unmap a snapshot from
+ a host.
+ - Snapshot can be attached to multiple hosts.
+ type: str
+ host_state:
+ description:
+ - The I(host_state) option is used to mention the existence of the host
+ for snapshot.
+ - It is required when a snapshot is mapped or unmapped from host.
+ type: str
+ choices: ['mapped', 'unmapped']
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Create a Snapshot for a CG
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ cg_name: "{{cg_name}}"
+ snapshot_name: "{{cg_snapshot_name}}"
+ description: "{{description}}"
+ auto_delete: False
+ state: "present"
+
+ - name: Create a Snapshot for a volume with Host attached
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ vol_name: "{{vol_name}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ description: "{{description}}"
+ expiry_time: "04/15/2025 16:30"
+ host_name: "{{host_name}}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Unmap a host for a Snapshot
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ host_name: "{{host_name}}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Map snapshot to a host
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ host_name: "{{host_name}}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Update attributes of a Snapshot for a volume
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ new_snapshot_name: "{{new_snapshot_name}}"
+ description: "{{new_description}}"
+ host_name: "{{host_name}}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Delete Snapshot of CG
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "{{cg_snapshot_name}}"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+snapshot_details:
+ description: Details of the snapshot.
+ returned: When snapshot exists
+ type: dict
+ contains:
+ is_auto_delete:
+ description: Additional information mentioned for snapshot.
+ type: str
+ expiration_time:
+ description: Date and time after which the snapshot
+ will expire.
+ type: str
+ hosts_list:
+ description: Contains the name and id of the associated
+ hosts.
+ type: dict
+ id:
+ description: Unique identifier of the snapshot instance.
+ type: str
+ name:
+ description: The name of the snapshot.
+ type: str
+ storage_resource_name:
+ description: Name of the storage resource for which the
+ snapshot exists.
+ type: str
+ storage_resource_id:
+ description: Id of the storage resource for which the snapshot
+ exists.
+ type: str
+ sample: {
+ "access_type": null,
+ "attached_wwn": null,
+ "creation_time": "2022-10-21 08:20:25.803000+00:00",
+ "creator_schedule": null,
+ "creator_type": "SnapCreatorTypeEnum.USER_CUSTOM",
+ "creator_user": {
+ "id": "user_admin"
+ },
+ "description": "Test snap creation",
+ "existed": true,
+ "expiration_time": null,
+ "hash": 8756689457056,
+ "hosts_list": [],
+ "id": "85899355291",
+ "io_limit_policy": null,
+ "is_auto_delete": true,
+ "is_modifiable": false,
+ "is_modified": false,
+ "is_read_only": true,
+ "is_system_snap": false,
+ "last_writable_time": null,
+ "lun": null,
+ "name": "ansible_snap_cg_1_1",
+ "parent_snap": null,
+ "size": null,
+ "snap_group": null,
+ "state": "SnapStateEnum.READY",
+ "storage_resource_id": "res_95",
+ "storage_resource_name": "CG_ansible_test_2_new"
+ }
+'''
+
+import logging
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+from datetime import datetime
+
+LOG = utils.get_logger('snapshot')
+
+application_type = "Ansible/1.6.0"
+
+
+class Snapshot(object):
+ """Class with Snapshot operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_snapshot_parameters())
+
+ mutually_exclusive = [['snapshot_name', 'snapshot_id'],
+ ['vol_name', 'cg_name'],
+ ['host_name', 'host_id']]
+
+ required_one_of = [['snapshot_name', 'snapshot_id']]
+ # initialize the ansible module
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # snapshot details
+ self.result = {"changed": False,
+ 'snapshot_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.snap_obj = utils.snap.UnitySnap(self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def validate_expiry_time(self, expiry_time):
+ """Validates the specified expiry_time"""
+ try:
+ datetime.strptime(expiry_time, '%m/%d/%Y %H:%M')
+ except ValueError:
+ error_msg = "expiry_time not in MM/DD/YYYY HH:MM format"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_update(self, snapshot, new_name=None, description=None,
+ auto_del=None, expiry_time=None, host=None,
+ host_state=None):
+ """Determines whether to update the snapshot or not"""
+ # If the snapshot has is_auto_delete True,
+ # Check if auto_delete in the input is either None or True
+ if expiry_time and snapshot.is_auto_delete and \
+ (auto_del is None or auto_del):
+ self.module.fail_json(msg="expiry_time can be assigned "
+ "when auto delete is False")
+ if auto_del and snapshot.expiration_time:
+ error_msg = "expiry_time for snapshot is set." \
+ " Once it is set then snapshot cannot" \
+ " be assigned to auto_delete policy"
+ self.module.fail_json(msg=error_msg)
+ if new_name and new_name != snapshot.name:
+ return True
+ if description and description != snapshot.description:
+ return True
+ if auto_del and auto_del != snapshot.is_auto_delete:
+ return True
+ if to_update_expiry_time(snapshot, expiry_time):
+ return True
+ if host and to_update_host_list(snapshot, host, host_state):
+ return True
+ return False
+
+ def update_snapshot(self, snapshot, new_name=None,
+ description=None, auto_del=None, expiry_time=None,
+ host_access_list=None):
+ try:
+ duration = None
+ if expiry_time:
+ duration = convert_timestamp_to_sec(
+ expiry_time, self.unity_conn.system_time)
+ if duration and duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time")
+ snapshot.modify(name=new_name, retentionDuration=duration,
+ isAutoDelete=auto_del, description=description,
+ hostAccess=host_access_list)
+ snapshot.update()
+ except Exception as e:
+ error_msg = "Failed to modify snapshot" \
+ " [name: %s , id: %s] with error %s"\
+ % (snapshot.name, snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_snapshot(self, snap_name, storage_id, description=None,
+ auto_del=None, expiry_time=None):
+ try:
+ duration = None
+ if expiry_time:
+ duration = convert_timestamp_to_sec(
+ expiry_time, self.unity_conn.system_time)
+ if duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time")
+ snapshot = self.snap_obj.create(
+ cli=self.unity_conn._cli, storage_resource=storage_id,
+ name=snap_name, description=description,
+ is_auto_delete=auto_del, retention_duration=duration)
+ return snapshot
+ except Exception as e:
+ error_msg = "Failed to create snapshot" \
+ " %s with error %s" % (snap_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_snapshot(self, snapshot):
+ try:
+ if not bool(get_hosts_dict(snapshot)):
+ snapshot.detach_from(None)
+ snapshot.delete()
+ else:
+ snapshot.delete()
+ return None
+
+ except Exception as e:
+ error_msg = "Failed to delete snapshot" \
+ " [name: %s, id: %s] with error %s" \
+ % (snapshot.name, snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_snapshot_obj(self, name=None, id=None):
+ snapshot = id if id else name
+ msg = "Failed to get details of snapshot %s with error %s "
+ try:
+ return self.unity_conn.get_snap(name=name, _id=id)
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = msg % (snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = msg % (snapshot, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ err_msg = msg % (snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_volume_obj(self, name):
+ try:
+ return self.unity_conn.get_lun(name=name)
+ except Exception as e:
+ error_msg = "Failed to get volume %s with error %s"\
+ % (name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_cg_obj(self, name):
+ try:
+ return self.unity_conn.get_cg(name=name)
+ except Exception as e:
+ error_msg = "Failed to get cg %s with error %s" % (name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_host_obj(self, name=None, id=None):
+ """ Get the Host object"""
+ try:
+ return self.unity_conn.get_host(name=name, _id=id)
+ except Exception as e:
+ host = id if id else name
+ error_msg = "Failed to get host %s with error %s"\
+ % (host, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def attach_to_snap(self, snapshot, host):
+ """ Attach snapshot to a host """
+ try:
+ if not get_hosts_dict(snapshot):
+ snapshot.detach_from(None)
+ snapshot.attach_to(host)
+ snapshot.update()
+ except Exception as e:
+ error_msg = "Failed to attach snapshot [name: %s, id: %s]" \
+ " to host [%s, %s] with error %s"\
+ % (snapshot.name, snapshot.id,
+ host.name, host.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on snapshot module based on parameters
+ chosen in playbook
+ """
+ snapshot_name = self.module.params['snapshot_name']
+ snapshot_id = self.module.params['snapshot_id']
+ vol_name = self.module.params['vol_name']
+ cg_name = self.module.params['cg_name']
+ auto_delete = self.module.params['auto_delete']
+ expiry_time = self.module.params['expiry_time']
+ description = self.module.params['description']
+ new_snapshot_name = self.module.params['new_snapshot_name']
+ host_name = self.module.params['host_name']
+ host_id = self.module.params['host_id']
+ host_state = self.module.params['host_state']
+ state = self.module.params['state']
+ host = None
+ storage_resource = None
+ changed = False
+
+ LOG.info("Getting Snapshot details")
+ snapshot = self.get_snapshot_obj(name=snapshot_name, id=snapshot_id)
+
+ if snapshot and not snapshot.existed:
+ snapshot = None
+ msg = "snapshot details: %s" % str(snapshot)
+ LOG.info(msg)
+
+ # Get Volume Object
+ if vol_name is not None:
+ if vol_name == "" or vol_name.isspace():
+ self.module.fail_json(msg="Invalid vol_name given, Please"
+ " provide a valid vol_name")
+ storage_resource = self.get_volume_obj(name=vol_name)
+
+ # Get Consistency Group Object
+ if cg_name is not None:
+ if cg_name == "" or cg_name.isspace():
+ self.module.fail_json(msg="Invalid cg_name given, Please"
+ " provide a valid cg_name")
+ storage_resource = self.get_cg_obj(name=cg_name)
+
+ # Get host object for volume snapshots
+ if host_id or host_name:
+ if cg_name:
+ self.module.fail_json(msg="Mapping CG snapshot to host"
+ " is not supported.")
+ host = self.get_host_obj(name=host_name, id=host_id)
+
+ # Check whether host_name or host_id is given in input
+ # along with host_state
+ if (host and not host_state) or (not host and host_state):
+ self.module.fail_json(
+ msg="Either host_name or host_id along with host_state "
+ "is required to map or unmap a snapshot from a host")
+
+ # Check for error, if user tries to create a snapshot with the
+ # same name for other storage resource.
+ if snapshot and storage_resource and\
+ (snapshot.storage_resource.id != storage_resource.id):
+ self.module.fail_json(
+ msg="Snapshot %s is of %s storage resource. Cannot create new"
+ " snapshot with same name for %s storage resource"
+ % (snapshot.name, snapshot.storage_resource.name,
+ storage_resource.name))
+
+ # check for valid expiry_time
+ if expiry_time is not None and \
+ (expiry_time == "" or expiry_time.isspace()):
+ self.module.fail_json(msg="Please provide valid expiry_time,"
+ " empty expiry_time given")
+ # Check if in input auto_delete is True and expiry_time is not None
+ if expiry_time and auto_delete:
+ error_msg = "Cannot set expiry_time if auto_delete given as True"
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ # Check whether to modify the snapshot or not
+ update_flag = False
+ if snapshot:
+ update_flag = self.to_update(snapshot,
+ new_name=new_snapshot_name,
+ description=description,
+ auto_del=auto_delete,
+ expiry_time=expiry_time,
+ host=host, host_state=host_state)
+ msg = "update_flag for snapshot %s" % str(update_flag)
+ LOG.info(msg)
+
+ # Create a Snapshot
+ if not snapshot and state == "present":
+ LOG.info("Creating a snapshot")
+ if snapshot_id:
+ self.module.fail_json(msg="Creation of Snapshot is allowed"
+ " using snapshot_name only, "
+ "snapshot_id given")
+ if snapshot_name == "" or snapshot_name.isspace():
+ self.module.fail_json(msg="snapshot_name is required for"
+ " creation of a snapshot,"
+ " empty snapshot_name given")
+ if not storage_resource:
+ self.module.fail_json(msg="vol_name or cg_name required to"
+ " create a snapshot")
+
+ if new_snapshot_name:
+ self.module.fail_json(
+ msg="new_snapshot_name can not be assigned"
+ " during creation of a snapshot")
+
+ snapshot = self.create_snapshot(snapshot_name,
+ storage_resource.id,
+ description, auto_delete,
+ expiry_time)
+ if host and host_state == "mapped":
+ self.attach_to_snap(snapshot, host)
+ changed = True
+
+ # Update the Snapshot
+ if snapshot and state == "present" and update_flag:
+
+ LOG.info("Updating the Snapshot details")
+
+ if host_state == 'mapped':
+ self.attach_to_snap(snapshot, host)
+ self.update_snapshot(
+ snapshot, new_name=new_snapshot_name,
+ description=description, auto_del=auto_delete,
+ expiry_time=expiry_time)
+
+ elif host_state == 'unmapped':
+ host_access_list = create_host_access_list(snapshot,
+ host,
+ host_state)
+ self.update_snapshot(
+ snapshot, new_name=new_snapshot_name,
+ description=description, auto_del=auto_delete,
+ expiry_time=expiry_time,
+ host_access_list=host_access_list)
+
+ else:
+ self.update_snapshot(
+ snapshot, new_name=new_snapshot_name,
+ description=description, auto_del=auto_delete,
+ expiry_time=expiry_time)
+ changed = True
+
+ # Delete the Snapshot
+ if state == "absent" and snapshot:
+ snapshot = self.delete_snapshot(snapshot)
+ changed = True
+
+ # Add snapshot details to the result.
+ if snapshot:
+ snapshot.update()
+ self.result["snapshot_details"] = \
+ create_snapshot_details_dict(snapshot)
+ else:
+ self.result["snapshot_details"] = {}
+
+ self.result["changed"] = changed
+ self.module.exit_json(**self.result)
+
+
+def create_snapshot_details_dict(snapshot):
+ """ Add name and id of storage resource and hosts to snapshot details """
+ snapshot_dict = snapshot._get_properties()
+ del snapshot_dict['storage_resource']
+ del snapshot_dict['host_access']
+ snapshot_dict['hosts_list'] = get_hosts_list(
+ get_hosts_dict(snapshot))
+ snapshot_dict['storage_resource_name'] = \
+ snapshot.storage_resource.name
+ snapshot_dict['storage_resource_id'] = \
+ snapshot.storage_resource.id
+ return snapshot_dict
+
+
+def get_hosts_list(hosts_dict):
+ """ Get the host name and host id of all the associated hosts """
+ hosts_list = []
+ if not hosts_dict:
+ return hosts_list
+
+ for host in list(hosts_dict.keys()):
+ hosts_list.append(
+ {
+ "host_name": host.name,
+ "host_id": host.id
+ }
+ )
+ return hosts_list
+
+
+def create_host_access_list(snapshot, host, host_state):
+ """ This method creates a List of dictionaries which will be used
+ to modify the list of hosts mapped to a snapshot """
+ host_access_list = []
+ hosts_dict = get_hosts_dict(snapshot)
+ # If snapshot is not attached to any host.
+ if not hosts_dict:
+ return None
+ if to_update_host_list(snapshot, host, host_state):
+ if host_state == "mapped":
+ return None
+ for snap_host in list(hosts_dict.keys()):
+ if snap_host != host:
+ access_dict = {'host': snap_host,
+ 'allowedAccess': hosts_dict[snap_host]}
+ host_access_list.append(access_dict)
+ return host_access_list
+
+
+def get_hosts_dict(snapshot):
+ """ This method creates a dictionary, with host as key and
+ allowed access as value """
+ hosts_dict = {}
+ LOG.info("Inside get_hosts_dict")
+ if not snapshot.host_access:
+ return hosts_dict
+ for host_access_obj in snapshot.host_access:
+ hosts_dict[host_access_obj.host] = \
+ host_access_obj.allowed_access
+ return hosts_dict
+
+
+def to_update_host_list(snapshot, host, host_state):
+ """ Determines whether to update hosts list or not"""
+ hosts_dict = get_hosts_dict(snapshot)
+ if (not hosts_dict or host not in list(hosts_dict.keys()))\
+ and host_state == "mapped":
+ return True
+ if (hosts_dict and host in list(hosts_dict.keys())) \
+ and host_state == "unmapped":
+ return True
+ return False
+
+
+def to_update_expiry_time(snapshot, expiry_time=None):
+ """ Check whether to update expiry_time or not"""
+ if not expiry_time:
+ return False
+ if snapshot.expiration_time is None:
+ return True
+ if convert_timestamp_to_sec(expiry_time, snapshot.expiration_time) != 0:
+ return True
+ return False
+
+
+def convert_timestamp_to_sec(expiry_time, snap_time):
+ """Converts the time difference to seconds"""
+ snap_time_str = snap_time.strftime('%m/%d/%Y %H:%M')
+ snap_timestamp = datetime.strptime(snap_time_str, '%m/%d/%Y %H:%M')
+ expiry_timestamp = datetime.strptime(expiry_time, "%m/%d/%Y %H:%M")
+ return int((expiry_timestamp - snap_timestamp).total_seconds())
+
+
+def get_snapshot_parameters():
+ """This method provide parameter required for the ansible snapshot
+ module on Unity"""
+ return dict(
+ snapshot_name=dict(required=False, type='str'),
+ snapshot_id=dict(required=False, type='str'),
+ vol_name=dict(required=False, type='str'),
+ cg_name=dict(required=False, type='str'),
+ auto_delete=dict(required=False, type='bool'),
+ expiry_time=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ new_snapshot_name=dict(required=False, type='str'),
+ host_name=dict(required=False, type='str'),
+ host_id=dict(required=False, type='str'),
+ host_state=dict(required=False, type='str',
+ choices=['mapped', 'unmapped']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity Snapshot object and perform actions on it
+ based on user input from playbook"""
+ obj = Snapshot()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py
new file mode 100644
index 000000000..aba5524cd
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py
@@ -0,0 +1,1002 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing snapshot schedules on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: snapshotschedule
+version_added: '1.1.0'
+short_description: Manage snapshot schedules on Unity storage system
+description:
+- Managing snapshot schedules on Unity storage system includes
+ creating new snapshot schedule, getting details of snapshot schedule,
+ modifying attributes of snapshot schedule, and deleting snapshot schedule.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
+options:
+ name:
+ description:
+ - The name of the snapshot schedule.
+ - Name is mandatory for a create operation.
+ - Specify either I(name) or I(id) (but not both) for any operation.
+ type: str
+ id:
+ description:
+ - The ID of the snapshot schedule.
+ type: str
+ type:
+ description:
+ - Type of the rule to be included in snapshot schedule.
+ - Type is mandatory for any create or modify operation.
+ - Once the snapshot schedule is created with one type it can be modified.
+ type: str
+ choices: ['every_n_hours', 'every_day', 'every_n_days', 'every_week',
+ 'every_month']
+ interval:
+ description:
+ - Number of hours between snapshots.
+ - Applicable only when rule type is C(every_n_hours).
+ type: int
+ hours_of_day:
+ description:
+ - Hours of the day when the snapshot will be taken.
+ - Applicable only when rule type is C(every_day).
+ type: list
+ elements: int
+ day_interval:
+ description:
+ - Number of days between snapshots.
+ - Applicable only when rule type is C(every_n_days).
+ type: int
+ days_of_week:
+ description:
+ - Days of the week for which the snapshot schedule rule applies.
+ - Applicable only when rule type is C(every_week).
+ type: list
+ elements: str
+ choices: ['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY',
+ 'FRIDAY', 'SATURDAY']
+ day_of_month:
+ description:
+ - Day of the month for which the snapshot schedule rule applies.
+ - Applicable only when rule type is C(every_month).
+ - Value should be [1, 31].
+ type: int
+ hour:
+ description:
+ - The hour when the snapshot will be taken.
+ - Applicable for C(every_n_days), C(every_week), C(every_month) rule types.
+ - For create operation, if I(hour) parameter is not specified, value will
+ be taken as C(0).
+ - Value should be [0, 23].
+ type: int
+ minute:
+ description:
+ - Minute offset from the hour when the snapshot will be taken.
+ - Applicable for all rule types.
+ - For a create operation, if I(minute) parameter is not specified, value will
+ be taken as C(0).
+ - Value should be [0, 59].
+ type: int
+ desired_retention:
+ description:
+ - The number of days/hours for which snapshot will be retained.
+ - When I(auto_delete) is C(true), I(desired_retention) cannot be specified.
+ - Maximum desired retention supported is 31 days or 744 hours.
+ type: int
+ retention_unit:
+ description:
+ - The retention unit for the snapshot.
+ default: 'hours'
+ type: str
+ choices: ['hours' , 'days']
+ auto_delete:
+ description:
+ - Indicates whether the system can automatically delete the snapshot.
+ type: bool
+ state:
+ description:
+ - Define whether the snapshot schedule should exist or not.
+ type: str
+ required: true
+ choices: [absent, present]
+notes:
+- Snapshot schedule created through Ansible will have only one rule.
+- Modification of rule type is not allowed. Within the same type, other
+ parameters can be modified.
+- If an existing snapshot schedule has more than 1 rule in it, only get and
+ delete operation is allowed.
+- The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create snapshot schedule (Rule Type - every_n_hours)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Hours_Testing"
+ type: "every_n_hours"
+ interval: 6
+ desired_retention: 24
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_day)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ hours_of_day:
+ - 8
+ - 14
+ auto_delete: True
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_n_days)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Day_Testing"
+ type: "every_n_days"
+ day_interval: 2
+ desired_retention: 16
+ retention_unit: "days"
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_week)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Week_Testing"
+ type: "every_week"
+ days_of_week:
+ - MONDAY
+ - FRIDAY
+ hour: 12
+ minute: 30
+ desired_retention: 200
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_month)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Month_Testing"
+ type: "every_month"
+ day_of_month: 17
+ auto_delete: True
+ state: "{{state_present}}"
+
+- name: Get snapshot schedule details using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Hours_Testing"
+ state: "{{state_present}}"
+
+- name: Get snapshot schedule details using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ state: "{{state_present}}"
+
+- name: Modify snapshot schedule details id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ type: "every_n_hours"
+ interval: 8
+ state: "{{state_present}}"
+
+- name: Modify snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ desired_retention: 200
+ auto_delete: False
+ state: "{{state_present}}"
+
+- name: Delete snapshot schedule using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ state: "{{state_absent}}"
+
+- name: Delete snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ state: "{{state_absent}}"
+"""
+
+RETURN = r"""
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+snapshot_schedule_details:
+ description: Details of the snapshot schedule.
+ returned: When snapshot schedule exists
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the snapshot schedule.
+ type: str
+ name:
+ description: The name of the snapshot schedule.
+ type: str
+ luns:
+ description: Details of volumes for which snapshot schedule
+ applied.
+ type: dict
+ contains:
+ UnityLunList:
+ description: List of volumes for which snapshot schedule
+ applied.
+ type: list
+ contains:
+ UnityLun:
+ description: Detail of volume.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to volume.
+ type: str
+ rules:
+ description: Details of rules that apply to snapshot schedule.
+ type: list
+ contains:
+ id:
+ description: The system ID of the rule.
+ type: str
+ interval:
+ description: Number of days or hours between snaps,
+ depending on the rule type.
+ type: int
+ hours:
+ description: Hourly frequency for the snapshot
+ schedule rule.
+ type: list
+ minute:
+ description: Minute frequency for the snapshot
+ schedule rule.
+ type: int
+ days_of_week:
+ description: Days of the week for which the snapshot
+ schedule rule applies.
+ type: dict
+ contains:
+ DayOfWeekEnumList:
+ description: Enumeration of days of the week.
+ type: list
+ days_of_month:
+ description: Days of the month for which the snapshot
+ schedule rule applies.
+ type: list
+ retention_time:
+ description: Period of time in seconds for which to keep
+ the snapshot.
+ type: int
+ retention_time_in_hours:
+ description: Period of time in hours for which to keep the
+ snapshot.
+ type: int
+ rule_type:
+ description: Type of the rule applied to snapshot schedule.
+ type: str
+ is_auto_delete:
+ description: Indicates whether the system can automatically
+ delete the snapshot based on pool automatic-deletion
+ thresholds.
+ type: bool
+ storage_resources:
+ description: Details of storage resources for which snapshot.
+ schedule applied.
+ type: dict
+ contains:
+ UnityStorageResourceList:
+ description: List of storage resources for which snapshot
+ schedule applied.
+ type: list
+ contains:
+ UnityStorageResource:
+ description: Detail of storage resource.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to storage
+ resource.
+ type: str
+ sample: {
+ "existed": true,
+ "hash": 8742032390151,
+ "id": "snapSch_63",
+ "is_default": false,
+ "is_modified": null,
+ "is_sync_replicated": false,
+ "luns": null,
+ "modification_time": "2021-12-14 21:37:47.905000+00:00",
+ "name": "SS7_empty_hour_SS",
+ "rules": [
+ {
+ "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT",
+ "days_of_month": null,
+ "days_of_week": {
+ "DayOfWeekEnumList": []
+ },
+ "existed": true,
+ "hash": 8742032280772,
+ "hours": [
+ 0
+ ],
+ "id": "SchedRule_109",
+ "interval": 2,
+ "is_auto_delete": false,
+ "minute": 0,
+ "retention_time": 86400,
+ "retention_time_in_hours": 24,
+ "rule_type": "every_n_days",
+ "type": "ScheduleTypeEnum.N_DAYS_AT_HHMM"
+ }
+ ],
+ "storage_resources": null,
+ "version": "ScheduleVersionEnum.LEGACY"
+ }
+"""
+
+import logging
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('snapshotschedule')
+
+application_type = "Ansible/1.6.0"
+
+
+class SnapshotSchedule(object):
+ """Class with snapshot schedule operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_snapshotschedule_parameters())
+
+ mutually_exclusive = [['name', 'id'], ['interval', 'hour'],
+ ['hours_of_day', 'hour'],
+ ['interval', 'hours_of_day', 'day_interval',
+ 'days_of_week', 'day_of_month']]
+ required_one_of = [['name', 'id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def schedule_modify_required(self, schedule_details):
+ """Check if the desired snapshot schedule state is different from
+ existing snapshot schedule state
+ :param schedule_details: The dict containing snapshot schedule
+ details
+ :return: Boolean value to indicate if modification is needed
+ """
+
+ # Check if existing snapshot schedule has auto_delete = True and
+ # playbook sets desired retention without mentioning auto_delete
+ if schedule_details['rules'][0]['is_auto_delete'] and\
+ self.module.params['desired_retention']\
+ and self.module.params['auto_delete'] is None:
+ self.module.fail_json(msg="Desired retention cannot be "
+ "specified when auto_delete is true"
+ )
+ if schedule_details['rules'][0]['retention_time'] and \
+ self.module.params['auto_delete']:
+ self.module.fail_json(msg="auto_delete cannot be specified when"
+ " existing desired retention is set")
+
+ desired_rule_type = get_schedule_value(self.module.params['type'])
+ existing_rule_string = schedule_details['rules'][0][
+ 'type'].split(".")[1]
+ existing_rule_type = utils.ScheduleTypeEnum[
+ existing_rule_string]._get_properties()['value']
+ modified = False
+
+ # Check if rule type is modified
+ if desired_rule_type != existing_rule_type:
+ self.module.fail_json(msg="Modification of rule type is not"
+ " allowed.")
+
+ # Convert desired retention to seconds
+ duration_in_sec = convert_retention_to_seconds(
+ self.module.params['desired_retention'],
+ self.module.params['retention_unit'])
+
+ if not duration_in_sec:
+ duration_in_sec = schedule_details['rules'][0]['retention_time']
+
+ # Check if common parameters for the rules getting modified
+ if (duration_in_sec and duration_in_sec != schedule_details[
+ 'rules'][0]['retention_time']):
+ modified = True
+ elif (self.module.params['auto_delete'] is not None and
+ self.module.params['auto_delete'] != schedule_details['rules']
+ [0]['is_auto_delete']):
+ modified = True
+
+ if (self.module.params['minute'] is not None and self.module.params[
+ 'minute'] != schedule_details['rules'][0]['minute']):
+ modified = True
+
+ if not modified and desired_rule_type == 0:
+ if (self.module.params['interval'] and self.module.params[
+ 'interval'] != schedule_details['rules'][0]['interval']):
+ modified = True
+ elif not modified and desired_rule_type == 1:
+ if (self.module.params['hours_of_day'] and
+ set(self.module.params['hours_of_day']) !=
+ set(schedule_details['rules'][0]['hours'])):
+ modified = True
+ elif not modified and desired_rule_type == 2:
+ if (self.module.params['day_interval'] and self.module.params[
+ 'day_interval'] != schedule_details['rules'][0]['interval'])\
+ or (self.module.params['hour'] is not None and
+ self.module.params['hour'] != schedule_details[
+ 'rules'][0]['hours'][0]):
+ modified = True
+ elif not modified and desired_rule_type == 3:
+ days = schedule_details['rules'][0]['days_of_week'][
+ 'DayOfWeekEnumList']
+ existing_days = list()
+
+ for day in days:
+ temp = day.split(".")
+ existing_days.append(temp[1])
+
+ if (self.module.params['days_of_week'] and
+ set(self.module.params['days_of_week']) !=
+ set(existing_days)) or\
+ (self.module.params['hour'] is not None and
+ self.module.params['hour'] != schedule_details['rules'][
+ 0]['hours'][0]):
+ modified = True
+ elif not modified and desired_rule_type == 4:
+ if (self.module.params['day_of_month'] and self.module.params[
+ 'day_of_month'] != schedule_details['rules'][0][
+ 'days_of_month'][0]) or\
+ (self.module.params['hour'] is not None and
+ self.module.params['hour'] != schedule_details['rules'][
+ 0]['hours'][0]):
+ modified = True
+ LOG.info("Modify Flag: %s", modified)
+ return modified
+
+ def get_days_of_week_enum(self, days_of_week):
+ """Get the enum for days of week.
+ :param days_of_week: The list of days of week
+ :return: The list of days_of_week enum
+ """
+
+ days_of_week_enum = []
+ for day in days_of_week:
+ if day in utils.DayOfWeekEnum.__members__:
+ days_of_week_enum.append(utils.DayOfWeekEnum[day])
+ else:
+ errormsg = "Invalid choice {0} for days of week".format(day)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ return days_of_week_enum
+
+ def create_rule(self, type, interval, hours_of_day, day_interval,
+ days_of_week, day_of_month, hour, minute,
+ desired_retention, retention_unit, auto_delete,
+ schedule_details=None):
+ """Create the rule."""
+
+ duration_in_sec = None
+ if desired_retention:
+ duration_in_sec = convert_retention_to_seconds(desired_retention,
+ retention_unit)
+
+ if not duration_in_sec and schedule_details:
+ duration_in_sec = schedule_details['rules'][0]['retention_time']
+
+ if hour is None and schedule_details is None:
+ hour = 0
+
+ if hour is None and schedule_details:
+ if schedule_details['rules'][0]['hours'] is not None:
+ hour = schedule_details['rules'][0]['hours'][0]
+
+ if minute is None and schedule_details is None:
+ minute = 0
+
+ if minute is None and schedule_details:
+ minute = schedule_details['rules'][0]['minute']
+
+ try:
+ if type == "every_n_hours":
+ if not interval:
+ interval = schedule_details['rules'][0]['interval']
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_n_hours(hour_interval=interval, minute=minute,
+ retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ elif type == "every_day":
+ if not hours_of_day:
+ hours_of_day = schedule_details['rules'][0]['hours']
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_day(hours=hours_of_day, minute=minute,
+ retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ elif type == "every_n_days":
+ if not day_interval:
+ day_interval = schedule_details['rules'][0]['interval']
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_n_days(day_interval=day_interval, hour=hour,
+ minute=minute,
+ retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ elif type == "every_week":
+ if days_of_week:
+ days_of_week_enum = self.get_days_of_week_enum(days_of_week)
+ else:
+ days = schedule_details['rules'][0]['days_of_week'][
+ 'DayOfWeekEnumList']
+ existing_days = list()
+
+ for day in days:
+ temp = day.split(".")
+ existing_days.append(temp[1])
+ days_of_week_enum = self.get_days_of_week_enum(days_of_week)
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_week(days_of_week=days_of_week_enum, hour=hour,
+ minute=minute, retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ else:
+ if day_of_month:
+ day_of_month_list = [day_of_month]
+ else:
+ day_of_month_list = schedule_details['rules'][0][
+ 'days_of_month']
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_month(days_of_month=day_of_month_list, hour=hour,
+ minute=minute, retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+
+ return rule_dict
+
+ except Exception as e:
+ errormsg = "Create operation of snapshot schedule rule " \
+ " failed with error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_snapshot_schedule(self, name, rule_dict):
+ """Create snapshot schedule.
+ :param name: The name of the snapshot schedule
+ :param rule_dict: The dict of the rule
+ :return: Boolean value to indicate if snapshot schedule created
+ """
+
+ try:
+ utils.snap_schedule.UnitySnapSchedule.create(
+ cli=self.unity_conn._cli, name=name, rules=[rule_dict])
+ return True
+
+ except Exception as e:
+ errormsg = "Create operation of snapshot schedule {0} failed" \
+ " with error {1}".format(name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_desired_retention(self, desired_retention, retention_unit):
+ """Validates the specified desired retention.
+ :param desired_retention: Desired retention of the snapshot
+ schedule
+ :param retention_unit: Retention unit for the snapshot schedule
+ """
+
+ if retention_unit == 'hours' and (desired_retention < 1 or
+ desired_retention > 744):
+ self.module.fail_json(msg="Please provide a valid integer as the"
+ " desired retention between 1 and 744.")
+ elif retention_unit == 'days' and (desired_retention < 1 or
+ desired_retention > 31):
+ self.module.fail_json(msg="Please provide a valid integer as the"
+ " desired retention between 1 and 31.")
+
+ def return_schedule_instance(self, id):
+ """Return the snapshot schedule instance
+ :param id: The id of the snapshot schedule
+ :return: Instance of the snapshot schedule
+ """
+
+ try:
+ obj_schedule = utils.snap_schedule.UnitySnapSchedule.get(
+ self.unity_conn._cli, id)
+ return obj_schedule
+
+ except Exception as e:
+ error_msg = "Failed to get the snapshot schedule {0} instance" \
+ " with error {1}".format(id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_snapshot_schedule(self, id):
+ """Delete snapshot schedule.
+ :param id: The ID of the snapshot schedule
+ :return: The boolean value to indicate if snapshot schedule
+ deleted
+ """
+
+ try:
+ obj_schedule = self.return_schedule_instance(id=id)
+ obj_schedule.delete()
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of snapshot schedule id:{0} failed" \
+ " with error {1}".format(id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_snapshot_schedule(self, id, schedule_details):
+ """Modify snapshot schedule details.
+ :param id: The id of the snapshot schedule
+ :param schedule_details: The dict containing schedule details
+ :return: The boolean value to indicate if snapshot schedule
+ modified
+ """
+
+ try:
+ obj_schedule = self.return_schedule_instance(id=id)
+ rule_id = schedule_details['rules'][0]['id']
+
+ if self.module.params['auto_delete'] is None:
+ auto_delete = schedule_details['rules'][0]['is_auto_delete']
+ else:
+ auto_delete = self.module.params['auto_delete']
+
+ if schedule_details['rules'][0]['is_auto_delete'] and\
+ self.module.params['desired_retention'] and\
+ self.module.params['auto_delete'] is False:
+ auto_delete = False
+ elif schedule_details['rules'][0]['retention_time']:
+ auto_delete = None
+
+ rule_dict = self.create_rule(
+ self.module.params['type'], self.module.params['interval'],
+ self.module.params['hours_of_day'],
+ self.module.params['day_interval'],
+ self.module.params['days_of_week'],
+ self.module.params['day_of_month'],
+ self.module.params['hour'], self.module.params['minute'],
+ self.module.params['desired_retention'],
+ self.module.params['retention_unit'], auto_delete,
+ schedule_details)
+
+ obj_schedule.modify(add_rules=[rule_dict],
+ remove_rule_ids=[rule_id])
+ return True
+ except Exception as e:
+ errormsg = "Modify operation of snapshot schedule id:{0} failed" \
+ " with error {1}".format(id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_details(self, id=None, name=None):
+ """Get snapshot schedule details.
+ :param id: The id of the snapshot schedule
+ :param name: The name of the snapshot schedule
+ :return: Dict containing snapshot schedule details if exists
+ """
+
+ id_or_name = id if id else name
+ errormsg = "Failed to get details of snapshot schedule {0} with" \
+ " error {1}"
+ try:
+ if not id:
+ details = utils.snap_schedule.UnitySnapScheduleList.get(
+ self.unity_conn._cli, name=name)
+
+ if details:
+ id = details[0].id
+
+ if id:
+ details = self.unity_conn.get_snap_schedule(_id=id)
+
+ if id and details.existed:
+ rule_list = [rules._get_properties() for rules in
+ details.rules]
+ for rule in rule_list:
+ rule['retention_time_in_hours'] = int(
+ rule['retention_time'] / 3600)
+ rule['rule_type'] = get_rule_type(rule['type'])
+ schedule_details = details._get_properties()
+ schedule_details['rules'] = rule_list
+ return schedule_details
+ else:
+ LOG.info("Failed to get the snapshot schedule %s", id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ auth_err = "Incorrect username or password, {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, auth_err)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_parameters(self):
+ """Validate the parameters."""
+
+ try:
+ if self.module.params['interval'] is not None and\
+ self.module.params['interval'] <= 0:
+ self.module.fail_json(msg="Interval can not be less than or"
+ " equal to 0.")
+
+ param_list = ['day_interval', 'day_of_month']
+ for param in param_list:
+ if self.module.params[param] is not None and\
+ self.module.params[param] == 0:
+ self.module.fail_json(msg="{0} can not be 0.".format(
+ param))
+
+ except Exception as e:
+ errormsg = "Failed to validate the module param with error" \
+ " {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on snapshot schedule module based on
+ parameters chosen in playbook
+ """
+ name = self.module.params['name']
+ id = self.module.params['id']
+ type = self.module.params['type']
+ interval = self.module.params['interval']
+ hours_of_day = self.module.params['hours_of_day']
+ day_interval = self.module.params['day_interval']
+ days_of_week = self.module.params['days_of_week']
+ day_of_month = self.module.params['day_of_month']
+ hour = self.module.params['hour']
+ minute = self.module.params['minute']
+ desired_retention = self.module.params['desired_retention']
+ retention_unit = self.module.params['retention_unit']
+ auto_delete = self.module.params['auto_delete']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and snapshot
+ # schedule details
+ result = dict(
+ changed=False,
+ snapshot_schedule_details={}
+ )
+
+ self.validate_parameters()
+
+ if desired_retention is not None:
+ self.validate_desired_retention(desired_retention, retention_unit)
+
+ if auto_delete and desired_retention:
+ self.module.fail_json(msg="Desired retention cannot be "
+ "specified when auto_delete is true"
+ )
+
+ schedule_details = self.get_details(name=name, id=id)
+
+ if not id and schedule_details:
+ id = schedule_details['id']
+
+ if state == 'present' and not schedule_details:
+ if not name:
+ msg = "The parameter name length is 0. It is too short." \
+ " The min length is 1."
+ self.module.fail_json(msg=msg)
+
+ if not type:
+ self.module.fail_json(msg="Rule type is necessary to create"
+ " snapshot schedule")
+
+ if type == "every_n_hours" and interval is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_n_hours, interval"
+ " is the mandatory parameter.")
+ elif type == "every_day" and hours_of_day is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_day, hours_of_day"
+ " is the mandatory parameter.")
+ elif type == "every_n_days" and day_interval is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_n_days,"
+ " day_interval is the mandatory"
+ " parameter.")
+ elif type == "every_week" and days_of_week is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_week,"
+ " days_of_week is the mandatory"
+ " parameter.")
+ elif type == "every_month" and day_of_month is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_month,"
+ " day_of_month is the mandatory"
+ " parameter.")
+
+ rule_dict = self.create_rule(type, interval, hours_of_day,
+ day_interval, days_of_week,
+ day_of_month, hour, minute,
+ desired_retention, retention_unit,
+ auto_delete)
+ result['changed'] = self.create_snapshot_schedule(name, rule_dict)
+
+ elif state == 'absent' and schedule_details:
+ result['changed'] = self.delete_snapshot_schedule(id)
+
+ if state == 'present' and type and schedule_details and\
+ len(schedule_details['rules']) == 1:
+ if (self.schedule_modify_required(schedule_details)):
+ result['changed'] = self.modify_snapshot_schedule(
+ id, schedule_details)
+
+ result['snapshot_schedule_details'] = self.get_details(name=name,
+ id=id)
+ self.module.exit_json(**result)
+
+
+def get_rule_type(type):
+ """Get the rule type of schedule.
+ :param type: The schedule type enum
+ :return: The rule type of snapshot schedule
+ """
+
+ schedule_type = {
+ "ScheduleTypeEnum.N_HOURS_AT_MM": "every_n_hours",
+ "ScheduleTypeEnum.DAY_AT_HHMM": "every_day",
+ "ScheduleTypeEnum.N_DAYS_AT_HHMM": "every_n_days",
+ "ScheduleTypeEnum.SELDAYS_AT_HHMM": "every_week",
+ "ScheduleTypeEnum.NTH_DAYOFMONTH_AT_HHMM": "every_month"
+ }
+
+ return schedule_type.get(type)
+
+
+def get_schedule_value(type):
+ """Get the enum for schedule.
+ :param type: The type of rule
+ :return: The enum value for rule
+ """
+
+ rule_type = {
+ "every_n_hours": 0,
+ "every_day": 1,
+ "every_n_days": 2,
+ "every_week": 3,
+ "every_month": 4
+ }
+
+ return rule_type.get(type)
+
+
+def convert_retention_to_seconds(desired_retention, retention_unit):
+ """Convert desired retention to seconds.
+ :param desired_retention: The desired retention for snapshot
+ schedule
+ :param retention_unit: The retention unit for snapshot schedule
+ :return: The integer value in seconds
+ """
+
+ duration_in_sec = None
+ if desired_retention:
+ if retention_unit == 'hours':
+ duration_in_sec = desired_retention * 60 * 60
+ else:
+ duration_in_sec = desired_retention * 24 * 60 * 60
+ return duration_in_sec
+
+
+def get_snapshotschedule_parameters():
+ """This method provide parameters required for the ansible snapshot
+ schedule module on Unity"""
+
+ return dict(
+ name=dict(type='str'),
+ id=dict(type='str'),
+ type=dict(type='str', choices=['every_n_hours', 'every_day',
+ 'every_n_days', 'every_week',
+ 'every_month']),
+ interval=dict(type='int'),
+ hours_of_day=dict(type='list', elements='int'),
+ day_interval=dict(type='int'),
+ days_of_week=dict(type='list', elements='str',
+ choices=['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY',
+ 'THURSDAY', 'FRIDAY', 'SATURDAY']),
+ day_of_month=dict(type='int'),
+ hour=dict(type='int'),
+ minute=dict(type='int'),
+ desired_retention=dict(type='int'),
+ retention_unit=dict(type='str', choices=['hours', 'days'],
+ default='hours'),
+ auto_delete=dict(type='bool'),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity snapshot schedule object and perform action on it
+ based on user input from playbook"""
+ obj = SnapshotSchedule()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/storagepool.py b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py
new file mode 100644
index 000000000..ddb7eef65
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py
@@ -0,0 +1,879 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing storage pool on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: storagepool
+version_added: '1.1.0'
+short_description: Manage storage pool on Unity
+description:
+- Managing storage pool on Unity storage system contains the operations
+ Get details of storage pool,
+ Create a storage pool,
+ Modify storage pool.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Ambuj Dubey (@AmbujDube) <ansible.team@dell.com>
+
+options:
+ pool_name:
+ description:
+ - Name of the storage pool, unique in the storage system.
+ type: str
+
+ pool_id:
+ description:
+ - Unique identifier of the pool instance.
+ type: str
+
+ new_pool_name:
+ description:
+ - New name of the storage pool, unique in the storage system.
+ type: str
+
+ pool_description:
+ description:
+ - The description of the storage pool.
+ type: str
+
+ fast_cache:
+ description:
+ - Indicates whether the fast cache is enabled for the storage pool.
+ - C(Enabled) - FAST Cache is enabled for the pool.
+ - C(Disabled) - FAST Cache is disabled for the pool.
+ choices: [enabled, disabled]
+ type: str
+
+ fast_vp:
+ description:
+ - Indicates whether to enable scheduled data relocations for the pool.
+ - C(Enabled) - Enabled scheduled data relocations for the pool.
+ - C(Disabled) - Disabled scheduled data relocations for the pool.
+ choices: [enabled, disabled]
+ type: str
+
+ raid_groups:
+ description:
+ - Parameters to create RAID group from the disks and add it to the pool.
+ type: dict
+ suboptions:
+ disk_group_id:
+ description:
+ - Id of the disk group.
+ type: str
+
+ disk_num:
+ description:
+ - Number of disks.
+ type: int
+
+ raid_type:
+ description:
+ - RAID group types or RAID levels.
+ choices: [None, RAID5, RAID0, RAID1, RAID3, RAID10, RAID6, Mixed, Automatic]
+ type: str
+
+ stripe_width :
+ description:
+ - RAID group stripe widths, including parity or mirror disks.
+ choices: ['BEST_FIT', '2', '4', '5', '6', '8', '9', '10', '12', '13', '14', '16']
+ type: str
+
+ alert_threshold:
+ description:
+ - Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage.
+ - Minimum threshold limit is 50.
+ - Maximum threshold limit is 84.
+ type: int
+
+ is_harvest_enabled:
+ description:
+ - Enable/Disable automatic deletion of snapshots based on pool space usage.
+ type: bool
+
+ pool_harvest_high_threshold:
+ description:
+ - Max threshold for space used in pool beyond which the system automatically starts deleting snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
+ - Minimum pool harvest high threshold value is 1.
+ - Maximum pool harvest high threshold value is 99.
+ type: float
+
+ pool_harvest_low_threshold:
+ description:
+ - Min threshold for space used in pool below which the system automatically stops deletion of snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
+ - Minimum pool harvest low threshold value is 0.
+ - Maximum pool harvest low threshold value is 98.
+ type: float
+
+ is_snap_harvest_enabled:
+ description:
+ - Enable/Disable automatic deletion of snapshots based on pool space usage.
+ type: bool
+
+ snap_harvest_high_threshold:
+ description:
+ - Max threshold for space used in snapshot beyond which the system automatically starts deleting snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
+ - Minimum snap harvest high threshold value is 1.
+ - Maximum snap harvest high threshold value is 99.
+ type: float
+
+ snap_harvest_low_threshold:
+ description:
+ - Min threshold for space used in snapshot below which the system will stop automatically deleting snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
+ - Minimum snap harvest low threshold value is 0.
+ - Maximum snap harvest low threshold value is 98.
+ type: float
+
+ pool_type:
+ description:
+ - Indicates storage pool type.
+ choices: [TRADITIONAL, DYNAMIC]
+ type: str
+
+ state:
+ description:
+ - Define whether the storage pool should exist or not.
+ - C(Present) - indicates that the storage pool should exist on the system.
+ - C(Absent) - indicates that the storage pool should not exist on the system.
+ choices: [absent, present]
+ type: str
+ required: true
+
+notes:
+- Deletion of storage pool is not allowed through Ansible module.
+- The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+- name: Get Storage pool details using pool_name
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "{{pool_name}}"
+ state: "present"
+
+- name: Get Storage pool details using pool_id
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_id: "{{pool_id}}"
+ state: "present"
+
+- name: Modify Storage pool attributes using pool_name
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "{{pool_name}}"
+ new_pool_name: "{{new_pool_name}}"
+ pool_description: "{{pool_description}}"
+ fast_cache: "{{fast_cache_enabled}}"
+ fast_vp: "{{fast_vp_enabled}}"
+ state: "present"
+
+- name: Modify Storage pool attributes using pool_id
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_id: "{{pool_id}}"
+ new_pool_name: "{{new_pool_name}}"
+ pool_description: "{{pool_description}}"
+ fast_cache: "{{fast_cache_enabled}}"
+ fast_vp: "{{fast_vp_enabled}}"
+ state: "present"
+
+- name: Create a StoragePool
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "Test"
+ pool_description: "test pool"
+ raid_groups:
+ disk_group_id : "dg_16"
+ disk_num : 2
+ raid_type : "RAID10"
+ stripe_width : "BEST_FIT"
+ alert_threshold : 50
+ is_harvest_enabled : True
+ pool_harvest_high_threshold : 60
+ pool_harvest_low_threshold : 40
+ is_snap_harvest_enabled : True
+ snap_harvest_high_threshold : 70
+ snap_harvest_low_threshold : 50
+ fast_vp: "enabled"
+ fast_cache: "enabled"
+ pool_type : "DYNAMIC"
+ state: "present"
+
+'''
+
+RETURN = r'''
+ changed:
+ description: Whether or not the storage pool has changed.
+ returned: always
+ type: bool
+ sample: True
+
+ storage_pool_details:
+ description: The storage pool details.
+ returned: When storage pool exists.
+ type: dict
+ contains:
+ id:
+ description: Pool id, unique identifier of the pool.
+ type: str
+ name:
+ description: Pool name, unique in the storage system.
+ type: str
+ is_fast_cache_enabled:
+ description: Indicates whether the fast cache is enabled for the storage
+ pool.
+ true - FAST Cache is enabled for the pool.
+ false - FAST Cache is disabled for the pool.
+ type: bool
+ is_fast_vp_enabled:
+ description: Indicates whether to enable scheduled data relocations
+ for the storage pool.
+ true - Enabled scheduled data relocations for the pool.
+ false - Disabled scheduled data relocations for the pool.
+ type: bool
+ size_free_with_unit:
+ description: Indicates size_free with its appropriate unit
+ in human readable form.
+ type: str
+ size_subscribed_with_unit:
+ description: Indicates size_subscribed with its appropriate unit in
+ human readable form.
+ type: str
+ size_total_with_unit:
+ description: Indicates size_total with its appropriate unit in human
+ readable form.
+ type: str
+ size_used_with_unit:
+ description: Indicates size_used with its appropriate unit in human
+ readable form.
+ type: str
+ snap_size_subscribed_with_unit:
+ description: Indicates snap_size_subscribed with its
+ appropriate unit in human readable form.
+ type: str
+ snap_size_used_with_unit:
+ description: Indicates snap_size_used with its
+ appropriate unit in human readable form.
+ type: str
+ drives:
+ description: Indicates information about the drives
+ associated with the storage pool.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the drive.
+ type: str
+ name:
+ description: Indicates name of the drive.
+ type: str
+ size:
+ description: Indicates size of the drive.
+ type: str
+ disk_technology:
+ description: Indicates disk technology of the drive.
+ type: str
+ tier_type:
+ description: Indicates tier type of the drive.
+ type: str
+ sample: {
+ "alert_threshold": 50,
+ "creation_time": "2022-03-08 14:05:32+00:00",
+ "description": "",
+ "drives": [
+ {
+ "disk_technology": "SAS",
+ "id": "dpe_disk_22",
+ "name": "DPE Drive 22",
+ "size": 590860984320,
+ "tier_type": "PERFORMANCE"
+ },
+ {
+ "disk_technology": "SAS",
+ "id": "dpe_disk_23",
+ "name": "DPE Drive 23",
+ "size": 590860984320,
+ "tier_type": "PERFORMANCE"
+ },
+ {
+ "disk_technology": "SAS",
+ "id": "dpe_disk_24",
+ "name": "DPE Drive 24",
+ "size": 590860984320,
+ "tier_type": "PERFORMANCE"
+ }
+ ],
+ "existed": true,
+ "harvest_state": "UsageHarvestStateEnum.IDLE",
+ "hash": 8744642897210,
+ "health": {
+ "UnityHealth": {
+ "hash": 8744642799842
+ }
+ },
+ "id": "pool_280",
+ "is_all_flash": false,
+ "is_empty": false,
+ "is_fast_cache_enabled": false,
+ "is_fast_vp_enabled": false,
+ "is_harvest_enabled": true,
+ "is_snap_harvest_enabled": true,
+ "metadata_size_subscribed": 105763569664,
+ "metadata_size_used": 57176752128,
+ "name": "test_pool",
+ "object_id": 12884902146,
+ "pool_fast_vp": {
+ "UnityPoolFastVp": {
+ "hash": 8744647518980
+ }
+ },
+ "pool_space_harvest_high_threshold": 59.0,
+ "pool_space_harvest_low_threshold": 40.0,
+ "pool_type": "StoragePoolTypeEnum.DYNAMIC",
+ "raid_type": "RaidTypeEnum.RAID10",
+ "rebalance_progress": null,
+ "size_free": 470030483456,
+ "size_free_with_unit": "437.75 GB",
+ "size_subscribed": 447215820800,
+ "size_subscribed_with_unit": "416.5 GB",
+ "size_total": 574720311296,
+ "size_total_with_unit": "535.25 GB",
+ "size_used": 76838068224,
+ "size_used_with_unit": "71.56 GB",
+ "snap_size_subscribed": 128851369984,
+ "snap_size_subscribed_with_unit": "120.0 GB",
+ "snap_size_used": 2351104,
+ "snap_size_used_with_unit": "2.24 MB",
+ "snap_space_harvest_high_threshold": 80.0,
+ "snap_space_harvest_low_threshold": 60.0,
+ "tiers": {
+ "UnityPoolTierList": [
+ {
+ "disk_count": [
+ 0,
+ 3,
+ 0
+ ],
+ "existed": true,
+ "hash": 8744643017382,
+ "name": [
+ "Extreme Performance",
+ "Performance",
+ "Capacity"
+ ],
+ "pool_units": [
+ null,
+ {
+ "UnityPoolUnitList": [
+ {
+ "UnityPoolUnit": {
+ "hash": 8744642786759,
+ "id": "rg_4"
+ }
+ },
+ {
+ "UnityPoolUnit": {
+ "hash": 8744642786795,
+ "id": "rg_5"
+ }
+ }
+ ]
+ },
+ null
+ ],
+ "raid_type": [
+ "RaidTypeEnum.NONE",
+ "RaidTypeEnum.RAID10",
+ "RaidTypeEnum.NONE"
+ ],
+ "size_free": [
+ 0,
+ 470030483456,
+ 0
+ ],
+ "size_moving_down": [
+ 0,
+ 0,
+ 0
+ ],
+ "size_moving_up": [
+ 0,
+ 0,
+ 0
+ ],
+ "size_moving_within": [
+ 0,
+ 0,
+ 0
+ ],
+ "size_total": [
+ 0,
+ 574720311296,
+ 0
+ ],
+ "size_used": [
+ 0,
+ 104689827840,
+ 0
+ ],
+ "stripe_width": [
+ null,
+ "RaidStripeWidthEnum._2",
+ null
+ ],
+ "tier_type": [
+ "TierTypeEnum.EXTREME_PERFORMANCE",
+ "TierTypeEnum.PERFORMANCE",
+ "TierTypeEnum.CAPACITY"
+ ]
+ }
+ ]
+ }
+ }
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import logging
+
+LOG = utils.get_logger('storagepool')
+
+application_type = "Ansible/1.6.0"
+
+
+class StoragePool(object):
+ """Class with storage pool operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_storagepool_parameters())
+
+ mutually_exclusive = [['pool_name', 'pool_id']]
+ required_one_of = [['pool_name', 'pool_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.conn = utils.\
+ get_unity_unisphere_connection(self.module.params, application_type)
+
+ def get_details(self, pool_id=None, pool_name=None):
+ """ Get storage pool details"""
+ try:
+ api_response = self.conn.get_pool(_id=pool_id, name=pool_name)
+ details = api_response._get_properties()
+
+ is_fast_vp_enabled = api_response._get_property_from_raw(
+ 'pool_fast_vp').is_schedule_enabled
+ details['is_fast_vp_enabled'] = is_fast_vp_enabled
+
+ details['size_free_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_free']))
+
+ details['size_subscribed_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_subscribed']))
+
+ details['size_total_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_total']))
+
+ details['size_used_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_used']))
+
+ details['snap_size_subscribed_with_unit'] = utils.\
+ convert_size_with_unit(int(details['snap_size_subscribed']))
+
+ details['snap_size_used_with_unit'] = utils.\
+ convert_size_with_unit(int(details['snap_size_used']))
+
+ pool_instance = utils.UnityPool.get(self.conn._cli, details['id'])
+ pool_tier_list = []
+ pool_tier_list.append((pool_instance.tiers)._get_properties())
+ pool_tier_dict = {}
+ pool_tier_dict['UnityPoolTierList'] = pool_tier_list
+ details['tiers'] = pool_tier_dict
+ return details
+ except Exception as e:
+ error = str(e)
+ check_list = ['not found', 'no attribute']
+ if any(ele in error for ele in check_list):
+ error_message = "pool details are not found"
+ LOG.info(error_message)
+ return None
+ error_message = 'Get details of storage pool failed with ' \
+ 'error: {0}'.format(str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def is_pool_modification_required(self, storage_pool_details):
+ """ Check if attributes of storage pool needs to be modified
+ """
+ try:
+ if self.module.params['new_pool_name'] and \
+ self.module.params['new_pool_name'] != \
+ storage_pool_details['name']:
+ return True
+
+ if self.module.params['pool_description'] is not None and \
+ self.module.params['pool_description'] != \
+ storage_pool_details['description']:
+ return True
+
+ if self.module.params['fast_cache']:
+ if (self.module.params['fast_cache'] == "enabled" and
+ not storage_pool_details['is_fast_cache_enabled']) or\
+ (self.module.params['fast_cache'] == "disabled" and storage_pool_details['is_fast_cache_enabled']):
+ return True
+
+ if self.module.params['fast_vp']:
+ if (self.module.params['fast_vp'] == "enabled" and
+ not storage_pool_details['is_fast_vp_enabled']) or \
+ (self.module.params['fast_vp'] == "disabled" and
+ storage_pool_details['is_fast_vp_enabled']):
+ return True
+
+ LOG.info("modify not required")
+ return False
+
+ except Exception as e:
+ error_message = 'Failed to determine if any modification'\
+ 'required for pool attributes with error: {0}'.format(str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def pool_modify(self, id, new_pool_name,
+ pool_description, fast_cache, fast_vp):
+ """ Modify attributes of storage pool """
+ pool_obj = utils.UnityPool.get(self.conn._cli, id)
+ try:
+ pool_obj.modify(name=new_pool_name, description=pool_description,
+ is_fast_cache_enabled=fast_cache,
+ is_fastvp_enabled=fast_vp)
+ new_storage_pool_details = self.get_details(pool_id=id,
+ pool_name=None)
+ LOG.info("Modification Successful")
+ return new_storage_pool_details
+ except Exception as e:
+ if self.module.params['pool_id']:
+ pool_identifier = self.module.params['pool_id']
+ else:
+ pool_identifier = self.module.params['pool_name']
+ error_message = 'Modify attributes of storage pool {0} ' \
+ 'failed with error: {1}'.format(pool_identifier, str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_pool_drives(self, pool_id=None, pool_name=None):
+ """ Get pool drives attached to pool"""
+ pool_identifier = pool_id or pool_name
+ pool_drives_list = []
+ try:
+ drive_instances = utils.UnityDiskList.get(self.conn._cli)
+ if drive_instances:
+ for drive in drive_instances:
+ if drive.pool and (drive.pool.id == pool_identifier or drive.pool.name == pool_identifier):
+ pool_drive = {"id": drive.id, "name": drive.name, "size": drive.size,
+ "disk_technology": drive.disk_technology.name,
+ "tier_type": drive.tier_type.name}
+ pool_drives_list.append(pool_drive)
+ LOG.info("Successfully retrieved pool drive details")
+ return pool_drives_list
+ except Exception as e:
+ error_message = 'Get details of pool drives failed with ' \
+ 'error: {0}'.format(str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_raid_type_enum(self, raid_type):
+ """ Get raid_type_enum.
+ :param raid_type: The raid_type
+ :return: raid_type enum
+ """
+
+ if raid_type in utils.RaidTypeEnum.__members__:
+ return utils.RaidTypeEnum[raid_type]
+ else:
+ errormsg = "Invalid choice %s for Raid Type" % raid_type
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_raid_stripe_width_enum(self, stripe_width):
+ """ Get raid_stripe_width enum.
+ :param stripe_width: The raid_stripe_width
+ :return: raid_stripe_width enum
+ """
+ if stripe_width != "BEST_FIT":
+ stripe_width = "_" + stripe_width
+ if stripe_width in utils.RaidStripeWidthEnum.__members__:
+ return utils.RaidStripeWidthEnum[stripe_width]
+ else:
+ errormsg = "Invalid choice %s for stripe width" % stripe_width
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_pool_type_enum(self, pool_type):
+ """ Get the storage pool_type enum.
+ :param pool_type: The pool_type
+ :return: pool_type enum
+ """
+
+ if pool_type == "TRADITIONAL":
+ return 1
+ elif pool_type == "DYNAMIC":
+ return 2
+ else:
+ errormsg = "Invalid choice %s for Storage Pool Type" % pool_type
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_raid_groups(self, raid_groups):
+ """ Get the raid groups for creating pool"""
+ try:
+ disk_obj = utils.UnityDiskGroup.get(self.conn._cli, _id=raid_groups['disk_group_id'])
+ disk_num = raid_groups['disk_num']
+ raid_type = raid_groups['raid_type']
+ raid_type = self.get_raid_type_enum(raid_type) \
+ if raid_type else None
+ stripe_width = raid_groups['stripe_width']
+ stripe_width = self.get_raid_stripe_width_enum(stripe_width) \
+ if stripe_width else None
+ raid_group = utils.RaidGroupParameter(disk_group=disk_obj,
+ disk_num=disk_num, raid_type=raid_type,
+ stripe_width=stripe_width)
+ raid_groups = [raid_group]
+ return raid_groups
+ except Exception as e:
+ error_message = 'Failed to create storage pool with error: %s' % str(e)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def validate_create_pool_params(self, alert_threshold=None,
+ pool_harvest_high_threshold=None,
+ pool_harvest_low_threshold=None,
+ snap_harvest_high_threshold=None,
+ snap_harvest_low_threshold=None):
+ """ Validates params for creating pool"""
+ if alert_threshold and (alert_threshold < 50 or alert_threshold > 84):
+ errormsg = "Alert threshold is not in the allowed value range of 50 - 84"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if pool_harvest_high_threshold and (pool_harvest_high_threshold < 1 or pool_harvest_high_threshold > 99):
+ errormsg = "Pool harvest high threshold is not in the allowed value range of 1 - 99"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if pool_harvest_low_threshold and (pool_harvest_low_threshold < 0 or pool_harvest_low_threshold > 98):
+ errormsg = "Pool harvest low threshold is not in the allowed value range of 0 - 98"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if snap_harvest_high_threshold and (snap_harvest_high_threshold < 1 or snap_harvest_high_threshold > 99):
+ errormsg = "Snap harvest high threshold is not in the allowed value range of 1 - 99"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if snap_harvest_low_threshold and (snap_harvest_low_threshold < 0 or snap_harvest_low_threshold > 98):
+ errormsg = "Snap harvest low threshold is not in the allowed value range of 0 - 98"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_pool(self, name, raid_groups):
+ """ Creates a StoragePool"""
+ try:
+ pool_obj = utils.UnityPool.get(self.conn._cli)
+ pool_description = self.module.params['pool_description']
+ raid_groups = self.get_raid_groups(raid_groups) \
+ if raid_groups else None
+ alert_threshold = self.module.params['alert_threshold']
+ pool_harvest_high_threshold = None
+ pool_harvest_low_threshold = None
+ snap_harvest_high_threshold = None
+ snap_harvest_low_threshold = None
+ is_harvest_enabled = self.module.params['is_harvest_enabled']
+ if is_harvest_enabled:
+ pool_harvest_high_threshold = self.module.params['pool_harvest_high_threshold']
+ pool_harvest_low_threshold = self.module.params['pool_harvest_low_threshold']
+ is_snap_harvest_enabled = self.module.params['is_snap_harvest_enabled']
+ if is_snap_harvest_enabled:
+ snap_harvest_high_threshold = self.module.params['snap_harvest_high_threshold']
+ snap_harvest_low_threshold = self.module.params['snap_harvest_low_threshold']
+ self.validate_create_pool_params(alert_threshold=alert_threshold,
+ pool_harvest_high_threshold=pool_harvest_high_threshold,
+ pool_harvest_low_threshold=pool_harvest_low_threshold,
+ snap_harvest_high_threshold=snap_harvest_high_threshold,
+ snap_harvest_low_threshold=snap_harvest_low_threshold)
+ pool_type = self.module.params['pool_type']
+ pool_type = self.get_pool_type_enum(pool_type) \
+ if pool_type else None
+ fast_vp = self.module.params['fast_vp']
+ if fast_vp:
+ if fast_vp == "enabled":
+ fast_vp = True
+ else:
+ fast_vp = False
+
+ pool_obj.create(self.conn._cli, name=name, description=pool_description, raid_groups=raid_groups,
+ alert_threshold=alert_threshold,
+ is_harvest_enabled=is_harvest_enabled,
+ is_snap_harvest_enabled=is_snap_harvest_enabled,
+ pool_harvest_high_threshold=pool_harvest_high_threshold,
+ pool_harvest_low_threshold=pool_harvest_low_threshold,
+ snap_harvest_high_threshold=snap_harvest_high_threshold,
+ snap_harvest_low_threshold=snap_harvest_low_threshold,
+ is_fastvp_enabled=fast_vp,
+ pool_type=pool_type)
+ LOG.info("Creation of storage pool successful")
+ storage_pool_details = self.get_details(pool_name=name)
+ changed = True
+ return changed, storage_pool_details
+ except Exception as e:
+ error_message = 'Failed to create storage pool with error: %s' % str(e)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on storage pool module based on parameters
+ chosen in playbook
+ """
+ pool_name = self.module.params['pool_name']
+ pool_id = self.module.params['pool_id']
+ new_pool_name = self.module.params['new_pool_name']
+ pool_description = self.module.params['pool_description']
+ fast_cache = self.module.params['fast_cache']
+ fast_vp = self.module.params['fast_vp']
+ state = self.module.params['state']
+ raid_groups = self.module.params['raid_groups']
+ if fast_cache:
+ if fast_cache == "enabled":
+ fast_cache = True
+ else:
+ fast_cache = False
+
+ if fast_vp:
+ if fast_vp == "enabled":
+ fast_vp = True
+ else:
+ fast_vp = False
+
+ # result is a dictionary that contains changed status and storage pool details
+ result = dict(
+ changed=False,
+ storage_pool_details={}
+ )
+
+ storage_pool_details = self.get_details(pool_id, pool_name)
+ result['storage_pool_details'] = storage_pool_details
+
+ if state == 'absent' and storage_pool_details:
+ error_message = 'Deletion of storage pool is not allowed through'\
+ ' Ansible module'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # Create storage pool
+ if state == 'present' and not storage_pool_details:
+ if pool_name is not None and len(pool_name) != 0:
+ result['changed'], storage_pool_details \
+ = self.create_pool(name=pool_name, raid_groups=raid_groups)
+ result['storage_pool_details'] = storage_pool_details
+ else:
+ error_message = 'The parameter pool_name length is 0. It'\
+ ' is too short. The min length is 1'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # Get pool drive details
+ if result['storage_pool_details']:
+ result['storage_pool_details']['drives'] = self.get_pool_drives(pool_id=pool_id, pool_name=pool_name)
+
+ if state == 'present' and storage_pool_details:
+ if new_pool_name is not None and len(new_pool_name) == 0:
+ error_message = 'The parameter new_pool_name length is 0. It'\
+ ' is too short. The min length is 1'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ pool_modify_flag = self.\
+ is_pool_modification_required(storage_pool_details)
+ LOG.info("Storage pool modification flag %s",
+ str(pool_modify_flag))
+
+ if pool_modify_flag:
+ result['storage_pool_details'] = \
+ self.pool_modify(storage_pool_details['id'], new_pool_name,
+ pool_description, fast_cache, fast_vp)
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+
+def get_storagepool_parameters():
+ """This method provides parameters required for the ansible storage pool
+ module on Unity"""
+ return dict(
+ pool_name=dict(required=False, type='str'),
+ pool_id=dict(required=False, type='str'),
+ new_pool_name=dict(required=False, type='str'),
+ pool_description=dict(required=False, type='str'),
+ fast_cache=dict(required=False, type='str', choices=['enabled',
+ 'disabled']),
+ fast_vp=dict(required=False, type='str', choices=['enabled',
+ 'disabled']),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ raid_groups=dict(required=False, type='dict', options=dict(
+ disk_group_id=dict(required=False, type='str'),
+ disk_num=dict(required=False, type='int'),
+ raid_type=dict(required=False, type='str', choices=['None', 'RAID5', 'RAID0', 'RAID1', 'RAID3', 'RAID10',
+ 'RAID6', 'Mixed', 'Automatic']),
+ stripe_width=dict(required=False, type='str', choices=['BEST_FIT', '2', '4', '5',
+ '6', '8', '9', '10', '12', '13', '14', '16']))),
+ alert_threshold=dict(required=False, type='int'),
+ is_harvest_enabled=dict(required=False, type='bool'),
+ pool_harvest_high_threshold=dict(required=False, type='float'),
+ pool_harvest_low_threshold=dict(required=False, type='float'),
+ is_snap_harvest_enabled=dict(required=False, type='bool'),
+ snap_harvest_high_threshold=dict(required=False, type='float'),
+ snap_harvest_low_threshold=dict(required=False, type='float'),
+ pool_type=dict(required=False, type='str', choices=['TRADITIONAL', 'DYNAMIC'])
+ )
+
+
+def main():
+ """ Create Unity storage pool object and perform action on it
+ based on user input from playbook"""
+ obj = StoragePool()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py
new file mode 100644
index 000000000..063834b45
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py
@@ -0,0 +1,706 @@
+#!/usr/bin/python
+# Copyright: (c) 2021, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing quota tree on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: tree_quota
+short_description: Manage quota tree on the Unity storage system
+description:
+- Managing Quota tree on the Unity storage system includes
+ Create quota tree,
+ Get quota tree,
+ Modify quota tree and
+ Delete quota tree.
+version_added: '1.2.0'
+extends_documentation_fragment:
+ - dellemc.unity.unity
+author:
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+options:
+ filesystem_name:
+ description:
+ - The name of the filesystem for which quota tree is created.
+ - For creation or modification of a quota tree either I(filesystem_name) or
+ I(filesystem_id) is required.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the filesystem for which the quota tree is created.
+ - For creation of a quota tree either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ nas_server_name:
+ description:
+ - The name of the NAS server in which the filesystem is created.
+ - For creation of a quota tree either I(nas_server_name) or
+ I(nas_server_id) is required.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS server in which the filesystem is created.
+ - For creation of a quota tree either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ tree_quota_id:
+ description:
+ - The ID of the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ view/modify/delete quota tree.
+ type: str
+ path:
+ description:
+ - The path to the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ create/view/modify/delete a quota tree.
+ - Path must start with a forward slash '/'.
+ type: str
+ hard_limit:
+ description:
+ - Hard limitation for a quota tree on the total space available. If exceeded,
+ users in quota tree cannot write data.
+ - Value C(0) implies no limit.
+ - One of the values of I(soft_limit) and I(hard_limit) can be C(0), however, both cannot be both C(0)
+ during creation of a quota tree.
+ type: int
+ soft_limit:
+ description:
+ - Soft limitation for a quota tree on the total space available. If exceeded,
+ notification will be sent to users in the quota tree for the grace period mentioned, beyond
+ which users cannot use space.
+ - Value C(0) implies no limit.
+ - Both I(soft_limit) and I(hard_limit) cannot be C(0) during creation of quota tree.
+ type: int
+ cap_unit:
+ description:
+ - Unit of I(soft_limit) and I(hard_limit) size.
+ - It defaults to C(GB) if not specified.
+ choices: ['MB', 'GB', 'TB']
+ type: str
+ description:
+ description:
+ - Description of a quota tree.
+ type: str
+ state:
+ description:
+ - The state option is used to mention the existence of the filesystem
+ quota tree.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Get quota tree details by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_10"
+ state: "present"
+
+ - name: Get quota tree details by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "fs_2171"
+ nas_server_id: "nas_21"
+ path: "/test"
+ state: "present"
+
+ - name: Create quota tree for a filesystem with filesystem id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ path: "/test_new"
+ state: "present"
+
+ - name: Create quota tree for a filesystem with filesystem name
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "Test_filesystem"
+ nas_server_name: "lglad068"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ path: "/test_new"
+ state: "present"
+
+ - name: Modify quota tree limit usage by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ path: "/test_new"
+ hard_limit: 10
+ cap_unit: "TB"
+ soft_limit: 8
+ state: "present"
+
+ - name: Modify quota tree by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ tree_quota_id: "treequota_171798700679_10"
+ hard_limit: 12
+ cap_unit: "TB"
+ soft_limit: 10
+ state: "present"
+
+ - name: Delete quota tree by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ tree_quota_id: "treequota_171798700679_10"
+ state: "absent"
+
+ - name: Delete quota tree by path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/test_new"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+get_tree_quota_details:
+ description: Details of the quota tree.
+ returned: When quota tree exists
+ type: dict
+ contains:
+ filesystem:
+ description: Filesystem details for which the quota
+ tree is created.
+ type: dict
+ contains:
+ UnityFileSystem:
+ description: Filesystem details for which the
+ quota tree is created.
+ type: dict
+ contains:
+ id:
+ description: ID of the filesystem for
+ which the quota tree is create.
+ type: str
+ description:
+ description: Description of the quota tree.
+ type: str
+ path:
+ description: Path to quota tree.
+ A valid path must start with a forward slash '/'.
+ It is mandatory while creating a quota tree.
+ type: str
+ hard_limit:
+ description: Hard limit of quota tree.
+ If the quota tree's space usage exceeds
+ the hard limit, users in quota tree cannot write data.
+ type: int
+ soft_limit:
+ description: Soft limit of the quota tree.
+ If the quota tree's space usage exceeds the soft limit,
+ the storage system starts to count down based
+ on the specified grace period.
+ type: int
+ id:
+ description: Quota tree ID.
+ type: str
+ size_used:
+ description: Size of used space in the filesystem by the user files.
+ type: int
+ gp_left:
+ description: The grace period left after the
+ soft limit for the user quota is exceeded.
+ type: int
+ state:
+ description: State of the quota tree.
+ type: int
+ sample: {
+ "description": "",
+ "existed": true,
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8788549469862,
+ "id": "fs_137",
+ "name": "test",
+ "nas_server": {
+ "id": "nas_1",
+ "name": "lglad072"
+ }
+ }
+ },
+ "gp_left": null,
+ "hard_limit": "6.0 TB",
+ "hash": 8788549497558,
+ "id": "treequota_171798694897_1",
+ "path": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "size_used": 0,
+ "soft_limit": "5.0 TB",
+ "state": 0
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('tree_quota')
+
+application_type = "Ansible/1.6.0"
+
+
+class QuotaTree(object):
+ """Class with Quota Tree operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_quota_tree_parameters())
+
+ mutually_exclusive = [['filesystem_name', 'filesystem_id'],
+ ['nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def check_quota_tree_is_present(self, fs_id, path, tree_quota_id):
+ """
+ Check if quota tree is present in filesystem.
+ :param fs_id: ID of filesystem where quota tree is searched.
+ :param path: Path to the quota tree
+ :param tree_quota_id: ID of the quota tree
+ :return: ID of quota tree if it exists else None.
+ """
+ if tree_quota_id is None and path is None:
+ return None
+
+ all_tree_quota = self.unity_conn.get_tree_quota(filesystem=fs_id,
+ id=tree_quota_id,
+ path=path)
+
+ if tree_quota_id and len(all_tree_quota) == 0 \
+ and self.module.params['state'] == "present":
+ errormsg = "Tree quota %s does not exist." % tree_quota_id
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if len(all_tree_quota) > 0:
+ msg = "Quota tree with id %s is present in filesystem %s" % (all_tree_quota[0].id,
+ fs_id)
+ LOG.info(msg)
+ return all_tree_quota[0].id
+ else:
+ return None
+
+ def create_quota_tree(self, fs_id, soft_limit, hard_limit, unit, path, description):
+ """
+ Create quota tree of a filesystem.
+ :param fs_id: ID of filesystem where quota tree is to be created.
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :param path: Path to quota tree
+ :param description: Description for quota tree
+ :return: Dict containing new quota tree details.
+ """
+
+ if soft_limit is None and hard_limit is None:
+ errormsg = "Both soft limit and hard limit cannot be empty. " \
+ "Please provide atleast one to create quota tree."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+ try:
+ obj_tree_quota = self.unity_conn.create_tree_quota(filesystem_id=fs_id, hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes, path=path,
+ description=description)
+ LOG.info("Successfully created quota tree")
+
+ if obj_tree_quota:
+ return obj_tree_quota
+ else:
+ return None
+
+ except Exception as e:
+ errormsg = "Create quota tree operation at path {0} failed in filesystem {1}" \
+ " with error {2}".format(path, fs_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem_tree_quota_display_attributes(self, tree_quota_id):
+ """Display quota tree attributes
+ :param tree_quota_id: Quota tree ID
+ :return: Quota tree dict to display
+ """
+ try:
+ tree_quota_obj = self.unity_conn.get_tree_quota(_id=tree_quota_id)
+ tree_quota_details = tree_quota_obj._get_properties()
+ if tree_quota_obj and tree_quota_obj.existed:
+ tree_quota_details['soft_limit'] = utils. \
+ convert_size_with_unit(int(tree_quota_details['soft_limit']))
+ tree_quota_details['hard_limit'] = utils. \
+ convert_size_with_unit(int(tree_quota_details['hard_limit']))
+
+ tree_quota_details['filesystem']['UnityFileSystem']['name'] = \
+ tree_quota_obj.filesystem.name
+ tree_quota_details['filesystem']['UnityFileSystem'].update(
+ {'nas_server': {'name': tree_quota_obj.filesystem.nas_server.name,
+ 'id': tree_quota_obj.filesystem.nas_server.id}})
+ return tree_quota_details
+
+ except Exception as e:
+ errormsg = "Failed to display quota tree details {0} with " \
+ "error {1}".format(tree_quota_obj.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem(self, nas_server=None, name=None, id=None):
+ """
+ Get filesystem details.
+ :param nas_server: Nas server object.
+ :param name: Name of filesystem.
+ :param id: ID of filesystem.
+ :return: Dict containing filesystem details if it exists.
+ """
+ id_or_name = id if id else name
+ try:
+ obj_fs = None
+ if name:
+ if not nas_server:
+ err_msg = "NAS Server is required to get the FileSystem."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ obj_fs = self.unity_conn.get_filesystem(name=name,
+ nas_server=nas_server)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ if id:
+ if nas_server:
+ obj_fs = self.unity_conn \
+ .get_filesystem(id=id, nas_server=nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(id=id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ except Exception as e:
+ error_msg = "Failed to get filesystem %s with error %s." \
+ % (id_or_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, name=None, id=None):
+ """
+ Get nas server details.
+ :param name: Nas server name.
+ :param id: Nas server ID.
+ :return: Dict containing nas server details if it exists.
+ """
+ nas_server = id if id else name
+ error_msg = ("Failed to get NAS server %s." % nas_server)
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if name and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ elif id and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ else:
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ except Exception as e:
+ error_msg = "Failed to get NAS server %s with error %s." \
+ % (nas_server, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def modify_tree_quota(self, tree_quota_id, soft_limit, hard_limit, unit, description):
+ """
+ Modify quota tree of filesystem.
+ :param tree_quota_id: ID of the quota tree
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :param description: Description of quota tree
+ :return: Boolean value whether modify quota tree operation is successful.
+ """
+ try:
+ if soft_limit is None and hard_limit is None:
+ return False
+ tree_quota_obj = self.unity_conn.get_tree_quota(tree_quota_id)._get_properties()
+ if soft_limit is None:
+ soft_limit_in_bytes = tree_quota_obj['soft_limit']
+ else:
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+ if hard_limit is None:
+ hard_limit_in_bytes = tree_quota_obj['hard_limit']
+ else:
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+
+ if description is None:
+ description = tree_quota_obj['description']
+
+ if tree_quota_obj:
+ if tree_quota_obj['soft_limit'] == soft_limit_in_bytes and \
+ tree_quota_obj['hard_limit'] == hard_limit_in_bytes and \
+ tree_quota_obj['description'] == description:
+ return False
+ else:
+ modify_tree_quota = self.unity_conn.modify_tree_quota(tree_quota_id=tree_quota_id,
+ hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes,
+ description=description)
+ LOG.info("Successfully modified quota tree")
+ if modify_tree_quota:
+ return True
+ except Exception as e:
+ errormsg = "Modify quota tree operation {0} failed" \
+ " with error {1}".format(tree_quota_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_tree_quota(self, tree_quota_id):
+ """
+ Delete quota tree of a filesystem.
+ :param tree_quota_id: ID of quota tree
+ :return: Boolean whether quota tree is deleted
+ """
+
+ try:
+ delete_tree_quota_obj = self.unity_conn.delete_tree_quota(tree_quota_id=tree_quota_id)
+
+ if delete_tree_quota_obj:
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of quota tree id:{0} " \
+ "failed with error {1}".format(tree_quota_id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on quota tree module based on parameters
+ passed in the playbook
+ """
+ filesystem_id = self.module.params['filesystem_id']
+ filesystem_name = self.module.params['filesystem_name']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ cap_unit = self.module.params['cap_unit']
+ state = self.module.params['state']
+ hard_limit = self.module.params['hard_limit']
+ soft_limit = self.module.params['soft_limit']
+ path = self.module.params['path']
+ description = self.module.params['description']
+ tree_quota_id = self.module.params['tree_quota_id']
+ create_tree_quota_obj = None
+ nas_server_resource = None
+ fs_id = None
+
+ '''
+ result is a dictionary to contain end state and quota tree details
+ '''
+ result = dict(
+ changed=False,
+ create_tree_quota=False,
+ modify_tree_quota=False,
+ get_tree_quota_details={},
+ delete_tree_quota=False
+
+ )
+
+ if (soft_limit or hard_limit) and cap_unit is None:
+ cap_unit = 'GB'
+
+ if soft_limit and utils.is_size_negative(soft_limit):
+ error_message = "Invalid soft_limit provided, " \
+ "must be greater than or equal to 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if hard_limit and utils.is_size_negative(hard_limit):
+ error_message = "Invalid hard_limit provided, " \
+ "must be greater than or equal to 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ '''
+ Get NAS server Object
+ '''
+
+ if nas_server_name is not None:
+ if utils.is_input_empty(nas_server_name):
+ self.module.fail_json(msg="Invalid nas_server_name given,"
+ " Please provide a valid name.")
+ nas_server_resource = self \
+ .get_nas_server_obj(name=nas_server_name)
+ elif nas_server_id is not None:
+ if utils.is_input_empty(nas_server_id):
+ self.module.fail_json(msg="Invalid nas_server_id given,"
+ " Please provide a valid ID.")
+ nas_server_resource = self.get_nas_server_obj(id=nas_server_id)
+
+ '''
+ Get filesystem Object
+ '''
+ if filesystem_name is not None:
+ if utils.is_input_empty(filesystem_name):
+ self.module.fail_json(msg="Invalid filesystem_name given,"
+ " Please provide a valid name.")
+ filesystem_obj = self \
+ .get_filesystem(nas_server=nas_server_resource,
+ name=filesystem_name)
+ fs_id = filesystem_obj.id
+ elif filesystem_id is not None:
+ if utils.is_input_empty(filesystem_id):
+ self.module.fail_json(msg="Invalid filesystem_id given,"
+ " Please provide a valid ID.")
+ filesystem_obj = self \
+ .get_filesystem(id=filesystem_id)
+ if filesystem_obj:
+ fs_id = filesystem_obj[0].id
+ else:
+ self.module.fail_json(msg="Filesystem does not exist.")
+
+ '''
+ Validate path to quota tree
+ '''
+ if path is not None:
+ if utils.is_input_empty(path):
+ self.module.fail_json(msg=" Please provide a valid path.")
+ elif not path.startswith('/'):
+ self.module.fail_json(msg="The path is relative to the root of the file system "
+ "and must start with a forward slash '/'.")
+
+ if filesystem_id is None and filesystem_name is None:
+ self.module.fail_json(msg="Please provide either filesystem_name or fileystem_id.")
+
+ quota_tree_id_present = self.check_quota_tree_is_present(fs_id, path, tree_quota_id)
+ tree_quota_id = quota_tree_id_present
+
+ '''
+ Create quota tree
+ '''
+
+ if (filesystem_id or filesystem_name) and path is not None and state == "present":
+ if not tree_quota_id:
+ LOG.info("Creating quota tree")
+ create_tree_quota_obj = self.create_quota_tree(fs_id, soft_limit, hard_limit,
+ cap_unit, path, description)
+
+ if create_tree_quota_obj:
+ tree_quota_id = create_tree_quota_obj.id
+ result['create_tree_quota'] = True
+
+ '''
+ Modify quota tree
+ '''
+
+ if tree_quota_id and state == "present":
+ LOG.info("Modifying quota tree")
+ result['modify_tree_quota'] = self.modify_tree_quota(tree_quota_id, soft_limit, hard_limit, cap_unit,
+ description)
+
+ '''
+ Delete quota tree
+ '''
+
+ if tree_quota_id is not None and state == "absent":
+ LOG.info("Deleting quota tree")
+ result['delete_tree_quota'] = self.delete_tree_quota(tree_quota_id)
+
+ '''
+ Get quota tree details
+ '''
+ if state == "present" and tree_quota_id is not None:
+ result['get_tree_quota_details'] = self.get_filesystem_tree_quota_display_attributes(tree_quota_id)
+ else:
+ result['get_tree_quota_details'] = {}
+
+ if result['create_tree_quota'] or result['modify_tree_quota'] or result['delete_tree_quota']:
+ result['changed'] = True
+
+ self.module.exit_json(**result)
+
+
+def get_quota_tree_parameters():
+ """This method provide parameters required for the ansible
+ quota tree module on Unity"""
+ return dict(
+ filesystem_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ hard_limit=dict(required=False, type='int'),
+ soft_limit=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
+ tree_quota_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ path=dict(required=False, type='str', no_log=True),
+ description=dict(required=False, type='str')
+ )
+
+
+def main():
+ """ Create Unity quota tree object and perform action on it
+ based on user input from playbook"""
+ obj = QuotaTree()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/user_quota.py b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py
new file mode 100644
index 000000000..d9116c3a5
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py
@@ -0,0 +1,1012 @@
+#!/usr/bin/python
+# Copyright: (c) 2021, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing User Quota on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: user_quota
+short_description: Manage user quota on the Unity storage system
+description:
+- Managing User Quota on the Unity storage system includes
+ Create user quota,
+ Get user quota,
+ Modify user quota,
+ Delete user quota,
+ Create user quota for quota tree,
+ Modify user quota for quota tree and
+ Delete user quota for quota tree.
+version_added: '1.2.0'
+extends_documentation_fragment:
+ - dellemc.unity.unity
+author:
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+options:
+ filesystem_name:
+ description:
+ - The name of the filesystem for which the user quota is created.
+ - For creation of a user quota either I(filesystem_name) or
+ I(filesystem_id) is required.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the filesystem for which the user quota is created.
+ - For creation of a user quota either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ nas_server_name:
+ description:
+ - The name of the NAS server in which the filesystem is created.
+ - For creation of a user quota either I(nas_server_name) or
+ I(nas_server_id) is required.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS server in which the filesystem is created.
+ - For creation of a user quota either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ hard_limit:
+ description:
+ - Hard limitation for a user on the total space available. If exceeded, user cannot write data.
+ - Value C(0) implies no limit.
+ - One of the values of I(soft_limit) and I(hard_limit) can be C(0), however, both cannot be C(0)
+ during creation or modification of user quota.
+ type: int
+ soft_limit:
+ description:
+ - Soft limitation for a user on the total space available. If exceeded,
+ notification will be sent to the user for the grace period mentioned, beyond
+ which the user cannot use space.
+ - Value C(0) implies no limit.
+ - Both I(soft_limit) and I(hard_limit) cannot be C(0) during creation or modification
+ of user quota.
+ type: int
+ cap_unit:
+ description:
+ - Unit of I(soft_limit) and I(hard_limit) size.
+ - It defaults to C(GB) if not specified.
+ choices: ['MB', 'GB', 'TB']
+ type: str
+ user_type:
+ description:
+ - Type of user creating a user quota.
+ - Mandatory while creating or modifying user quota.
+ choices: ['Unix', 'Windows']
+ type: str
+ win_domain:
+ description:
+ - Fully qualified or short domain name for Windows user type.
+ - Mandatory when I(user_type) is C(Windows).
+ type: str
+ user_name:
+ description:
+ - User name of the user quota when I(user_type) is C(Windows) or C(Unix).
+ - Option I(user_name) must be specified along with I(win_domain) when I(user_type) is C(Windows).
+ type: str
+ uid:
+ description:
+ - User ID of the user quota.
+ type: str
+ user_quota_id:
+ description:
+ - User quota ID generated after creation of a user quota.
+ type: str
+ tree_quota_id:
+ description:
+ - The ID of the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ create/modify/delete user quota for a quota tree.
+ type: str
+ path:
+ description:
+ - The path to the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ create/modify/delete user quota for a quota tree.
+ - Path must start with a forward slash '/'.
+ type: str
+ state:
+ description:
+ - The I(state) option is used to mention the existence of the user quota.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Get user quota details by user quota id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ user_quota_id: "userquota_171798700679_0_123"
+ state: "present"
+
+ - name: Get user quota details by user quota uid/user name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "fs_2171"
+ nas_server_id: "nas_21"
+ user_name: "test"
+ state: "present"
+
+ - name: Create user quota for a filesystem with filesystem id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ uid: "111"
+ state: "present"
+
+ - name: Create user quota for a filesystem with filesystem name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "Test_filesystem"
+ nas_server_name: "lglad068"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ uid: "111"
+ state: "present"
+
+ - name: Modify user quota limit usage by user quota id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ user_quota_id: "userquota_171798700679_0_123"
+ hard_limit: 10
+ cap_unit: "TB"
+ soft_limit: 8
+ state: "present"
+
+ - name: Modify user quota by filesystem id and user quota uid/user_name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ hard_limit: 12
+ cap_unit: "TB"
+ soft_limit: 10
+ state: "present"
+
+ - name: Delete user quota
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+
+ - name: Create user quota of a quota tree
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ soft_limit: 9
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Create user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ user_type: "Unix"
+ user_name: "test"
+ hard_limit: 2
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Modify user quota of a quota tree
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ soft_limit: 10
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Modify user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ hard_limit: 12
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Delete user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+
+ - name: Delete user quota of a quota tree by quota tree id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+get_user_quota_details:
+ description: Details of the user quota.
+ returned: When user quota exists
+ type: dict
+ contains:
+ filesystem:
+ description: Filesystem details for which the user quota is
+ created.
+ type: dict
+ contains:
+ UnityFileSystem:
+ description: Filesystem details for which the
+ user quota is created.
+ type: dict
+ contains:
+ id:
+ description: ID of the filesystem for
+ which the user quota is created.
+ type: str
+ name:
+ description: Name of filesystem.
+ type: str
+ nas_server:
+ description: Nasserver details where
+ filesystem is created.
+ type: dict
+ contains:
+ name:
+ description: Name of nasserver.
+ type: str
+ id:
+ description: ID of nasserver.
+ type: str
+ tree_quota:
+ description: Quota tree details for which the user quota is
+ created.
+ type: dict
+ contains:
+ UnityTreeQuota:
+ description: Quota tree details for which the user
+ quota is created.
+ type: dict
+ contains:
+ id:
+ description: ID of the quota tree.
+ type: str
+ path:
+ description: Path to quota tree.
+ type: str
+ gp_left:
+ description: The grace period left after the soft limit
+ for the user quota is exceeded.
+ type: int
+ hard_limit:
+ description: Hard limitation for a user on the total space
+ available. If exceeded, user cannot write data.
+ type: int
+ hard_ratio:
+ description: The hard ratio is the ratio between the
+ hard limit size of the user quota
+ and the amount of storage actually consumed.
+ type: str
+ soft_limit:
+ description: Soft limitation for a user on the total space
+ available. If exceeded, notification will be
+ sent to user for the grace period mentioned, beyond
+ which user cannot use space.
+ type: int
+ soft_ratio:
+ description: The soft ratio is the ratio between
+ the soft limit size of the user quota
+ and the amount of storage actually consumed.
+ type: str
+ id:
+ description: User quota ID.
+ type: str
+ size_used:
+ description: Size of used space in the filesystem
+ by the user files.
+ type: int
+ state:
+ description: State of the user quota.
+ type: int
+ uid:
+ description: User ID of the user.
+ type: int
+ unix_name:
+ description: Unix user name for this user quota's uid.
+ type: str
+ windows_names:
+ description: Windows user name that maps to this quota's uid.
+ type: str
+ windows_sids:
+ description: Windows SIDs that maps to this quota's uid
+ type: str
+ sample: {
+ "existed": true,
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "id": "fs_120",
+ "name": "nfs-multiprotocol",
+ "nas_server": {
+ "id": "nas_1",
+ "name": "lglad072"
+ }
+ }
+ },
+ "gp_left": null,
+ "hard_limit": "10.0 GB",
+ "hard_ratio": null,
+ "hash": 8752448438089,
+ "id": "userquota_171798694698_0_60000",
+ "size_used": 0,
+ "soft_limit": "10.0 GB",
+ "soft_ratio": null,
+ "state": 0,
+ "tree_quota": null,
+ "uid": 60000,
+ "unix_name": null,
+ "windows_names": null,
+ "windows_sids": null
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('user_quota')
+
+application_type = "Ansible/1.6.0"
+
+
+class UserQuota(object):
+ """Class with User Quota operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_user_quota_parameters())
+
+ mutually_exclusive = [['user_name', 'uid'], ['uid', 'win_domain'],
+ ['filesystem_name', 'filesystem_id'],
+ ['nas_server_name', 'nas_server_id'],
+ ['user_name', 'user_quota_id'],
+ ['uid', 'user_quota_id']]
+
+ required_if = [('user_type', 'Windows', ['win_domain', 'user_name'], False),
+ ('user_type', 'Unix', ['user_name'], False)]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def check_user_is_present(self, fs_id, uid, unix, win_name, user_quota_id):
+ """
+ Check if user quota is present in filesystem.
+ :param fs_id: ID of filesystem where user quota is searched.
+ :param uid: UID of the user quota
+ :param unix: Unix user name of user quota
+ :param win_name: Windows user name of user quota
+ :param user_quota_id: ID of the user quota
+ :return: ID of user quota if it exists else None.
+ """
+
+ if not self.check_user_type_provided(win_name, uid, unix):
+ return None
+
+ user_name_or_uid_or_id = unix if unix else win_name if win_name else uid if \
+ uid else user_quota_id
+
+ # All user quotas in the given filesystem
+ all_user_quota = self.unity_conn.get_user_quota(filesystem=fs_id, id=user_quota_id,
+ unix_name=unix, windows_names=win_name,
+ uid=uid)
+
+ for user_quota in range(len(all_user_quota)):
+
+ if all_user_quota[user_quota].tree_quota is None:
+ msg = "User quota %s with id %s " \
+ "is present in filesystem %s" \
+ % (user_name_or_uid_or_id, all_user_quota[user_quota].id, fs_id)
+ LOG.info(msg)
+ return all_user_quota[user_quota].id
+
+ return None
+
+ def check_quota_tree_is_present(self, fs_id, path, tree_quota_id):
+ """
+ Check if quota tree is present in filesystem.
+ :param fs_id: ID of filesystem where quota tree is searched.
+ :param path: Path to quota tree
+ :param tree_quota_id: ID of the quota tree
+ :return: ID of quota tree if it exists.
+ """
+
+ path_or_id = path if path else tree_quota_id
+ tree_quota_obj = self.unity_conn.get_tree_quota(filesystem=fs_id, path=path,
+ id=tree_quota_id)
+ if len(tree_quota_obj) > 0:
+ msg = "Tree quota id %s present in filesystem %s" % (tree_quota_obj[0].id, fs_id)
+ LOG.info(msg)
+ return tree_quota_obj[0].id
+ else:
+ errormsg = "The quota tree '%s' does not exist" % path_or_id
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def check_user_quota_in_quota_tree(self, tree_quota_id, uid, unix, win_name, user_quota_id):
+ """
+ Check if user quota is present in quota tree.
+ :param tree_quota_id: ID of quota tree where user quota is searched.
+ :param uid: UID of user quota
+ :param unix: Unix name of user quota
+ :param win_name: Windows name of user quota
+ :param user_quota_id: ID of the user quota
+ :return: ID of user quota if it exists in quota tree else None.
+ """
+ if not self.check_user_type_provided(win_name, uid, unix):
+ return None
+
+ user_quota_name = uid if uid else unix if unix else win_name \
+ if win_name else user_quota_id
+ user_quota_obj = self.unity_conn.get_user_quota(tree_quota=tree_quota_id,
+ uid=uid, windows_names=win_name,
+ unix_name=unix,
+ id=user_quota_id)
+ if len(user_quota_obj) > 0:
+ msg = "User quota %s is present in quota tree %s " % (user_quota_name, tree_quota_id)
+ LOG.info(msg)
+ return user_quota_obj[0].id
+ else:
+ return None
+
+ def create_user_quota(self, fs_id, soft_limit, hard_limit, unit, uid, unix, win_name, tree_quota_id):
+ """
+ Create user quota of a filesystem.
+ :param fs_id: ID of filesystem where user quota is to be created.
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :param uid: UID of the user quota
+ :param unix: Unix user name of user quota
+ :param win_name: Windows user name of user quota
+ :param tree_quota_id: ID of tree quota
+ :return: Object containing new user quota details.
+ """
+
+ unix_or_uid_or_win = uid if uid else unix if unix else win_name
+ fs_id_or_tree_quota_id = fs_id if fs_id else tree_quota_id
+ if soft_limit is None and hard_limit is None:
+ errormsg = "Both soft limit and hard limit cannot be empty. " \
+ "Please provide atleast one to create user quota."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+ try:
+ if self.check_user_type_provided(win_name, uid, unix):
+ obj_user_quota = self.unity_conn.create_user_quota(filesystem_id=fs_id,
+ hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes,
+ uid=uid, unix_name=unix,
+ win_name=win_name,
+ tree_quota_id=tree_quota_id)
+ LOG.info("Successfully created user quota")
+ return obj_user_quota
+
+ except Exception as e:
+ errormsg = "Create quota for user {0} on {1} , failed with error {2} "\
+ .format(unix_or_uid_or_win, fs_id_or_tree_quota_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem_user_quota_display_attributes(self, user_quota_id):
+ """Get display user quota attributes
+ :param user_quota_id: User quota ID
+ :return: User quota dict to display
+ """
+ try:
+ user_quota_obj = self.unity_conn.get_user_quota(user_quota_id)
+ user_quota_details = user_quota_obj._get_properties()
+
+ if user_quota_obj and user_quota_obj.existed:
+ user_quota_details['soft_limit'] = utils. \
+ convert_size_with_unit(int(user_quota_details['soft_limit']))
+ user_quota_details['hard_limit'] = utils. \
+ convert_size_with_unit(int(user_quota_details['hard_limit']))
+
+ user_quota_details['filesystem']['UnityFileSystem']['name'] = \
+ user_quota_obj.filesystem.name
+ user_quota_details['filesystem']['UnityFileSystem'].update(
+ {'nas_server': {'name': user_quota_obj.filesystem.nas_server.name,
+ 'id': user_quota_obj.filesystem.nas_server.id}})
+
+ if user_quota_obj.tree_quota:
+ user_quota_details['tree_quota']['UnityTreeQuota']['path'] = \
+ user_quota_obj.tree_quota.path
+
+ return user_quota_details
+ else:
+ errormsg = "User quota does not exist."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ except Exception as e:
+ errormsg = "Failed to display the details of user quota {0} with " \
+ "error {1}".format(user_quota_obj.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem(self, nas_server=None, name=None, id=None):
+ """
+ Get filesystem details.
+ :param nas_server: Nas server object.
+ :param name: Name of filesystem.
+ :param id: ID of filesystem.
+ :return: Object containing filesystem details if it exists.
+ """
+ id_or_name = id if id else name
+ try:
+ obj_fs = None
+ if name:
+ if not nas_server:
+ err_msg = "NAS Server is required to get the FileSystem."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ obj_fs = self.unity_conn.get_filesystem(name=name,
+ nas_server=nas_server)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ if id:
+ if nas_server:
+ obj_fs = self.unity_conn \
+ .get_filesystem(id=id, nas_server=nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(id=id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ except Exception as e:
+ error_msg = "Failed to get filesystem %s with error %s." \
+ % (id_or_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, name=None, id=None):
+ """
+ Get nas server details.
+ :param name: Nas server name.
+ :param id: Nas server ID.
+ :return: Object containing nas server details if it exists.
+ """
+ nas_server = id if id else name
+ error_msg = ("Failed to get NAS server %s." % nas_server)
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if name and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ elif id and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ else:
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ except Exception as e:
+ error_msg = "Failed to get NAS server %s with error %s." \
+ % (nas_server, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def modify_user_quota(self, user_quota_id, soft_limit, hard_limit, unit):
+ """
+ Modify user quota of filesystem by its uid/username/user quota id.
+ :param user_quota_id: ID of the user quota
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :return: Boolean value whether modify user quota operation is successful.
+ """
+
+ if soft_limit is None and hard_limit is None:
+ return False
+
+ user_quota_obj = self.unity_conn.get_user_quota(user_quota_id)._get_properties()
+
+ if soft_limit is None:
+ soft_limit_in_bytes = user_quota_obj['soft_limit']
+ else:
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+
+ if hard_limit is None:
+ hard_limit_in_bytes = user_quota_obj['hard_limit']
+ else:
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+
+ if user_quota_obj:
+ if user_quota_obj['soft_limit'] == soft_limit_in_bytes and \
+ user_quota_obj['hard_limit'] == hard_limit_in_bytes:
+ return False
+ else:
+ error_msg = "The user quota does not exist."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ try:
+ obj_user_quota = self.unity_conn.modify_user_quota(user_quota_id=user_quota_id,
+ hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes)
+ LOG.info("Successfully modified user quota")
+ if obj_user_quota:
+ return True
+ except Exception as e:
+ errormsg = "Modify user quota {0} failed" \
+ " with error {1}".format(user_quota_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def check_user_type_provided(self, win_name, uid, unix_name):
+ """Checks if user type or uid is provided
+ :param win_name: Windows name of user quota
+ :param uid: UID of user quota
+ :param unix_name: Unix name of user quota"""
+ if win_name is None and uid is None and unix_name is None:
+ return False
+ else:
+ return True
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on user quota module based on parameters
+ passed in the playbook
+ """
+ filesystem_id = self.module.params['filesystem_id']
+ filesystem_name = self.module.params['filesystem_name']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ cap_unit = self.module.params['cap_unit']
+ state = self.module.params['state']
+ user_quota_id = self.module.params['user_quota_id']
+ hard_limit = self.module.params['hard_limit']
+ soft_limit = self.module.params['soft_limit']
+ user_type = self.module.params['user_type']
+ uid = self.module.params['uid']
+ user_name = self.module.params['user_name']
+ win_domain = self.module.params['win_domain']
+ tree_quota_id = self.module.params['tree_quota_id']
+ path = self.module.params['path']
+ create_user_quota_obj = None
+ win_name = None
+ unix_name = None
+ nas_server_resource = None
+ fs_id = None
+ user_quota_details = ''
+ filesystem_obj = None
+
+ '''
+ result is a dictionary to contain end state and user quota details
+ '''
+ result = dict(
+ changed=False,
+ create_user_quota=False,
+ modify_user_quota=False,
+ get_user_quota_details={},
+ delete_user_quota=False
+ )
+
+ if (soft_limit or hard_limit) and cap_unit is None:
+ cap_unit = 'GB'
+
+ if soft_limit == 0 and hard_limit == 0:
+ error_message = 'Both soft limit and hard limit cannot be unlimited'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if soft_limit and utils.is_size_negative(soft_limit):
+ error_message = "Invalid soft_limit provided, " \
+ "must be greater than 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if hard_limit and utils.is_size_negative(hard_limit):
+ error_message = "Invalid hard_limit provided, " \
+ "must be greater than 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if (user_type or uid) and filesystem_id is None and \
+ filesystem_name is None and tree_quota_id is None:
+ error_message = 'Please provide either ' \
+ 'filesystem_name or filesystem_id'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if (nas_server_name or nas_server_id) \
+ and (filesystem_id is None and filesystem_name is None):
+ error_message = 'Please provide either ' \
+ 'filesystem_name or filesystem_id'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ '''
+ Validate path to quota tree
+ '''
+ if path is not None:
+ if utils.is_input_empty(path):
+ self.module.fail_json(msg=" Please provide a valid path.")
+ elif not path.startswith('/'):
+ self.module.fail_json(msg="The path is relative to the root of the file system "
+ "and must start with a forward slash.")
+
+ if filesystem_id is None and filesystem_name is None:
+ self.module.fail_json(msg="Please provide either filesystem_name or fileystem_id.")
+
+ if user_type and filesystem_id is None and filesystem_name is None and tree_quota_id is None:
+ error_message = 'Please provide either ' \
+ 'filesystem_name or filesystem_id to create user quota for a' \
+ 'filesystem. Or provide tree_quota_id to create user quota for a quota tree.'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ '''
+ Get NAS server Object
+ '''
+
+ if nas_server_name is not None:
+ if utils.is_input_empty(nas_server_name):
+ self.module.fail_json(msg="Invalid nas_server_name given,"
+ " Please provide a valid name.")
+ nas_server_resource = self \
+ .get_nas_server_obj(name=nas_server_name)
+ elif nas_server_id is not None:
+ if utils.is_input_empty(nas_server_id):
+ self.module.fail_json(msg="Invalid nas_server_id given,"
+ " Please provide a valid ID.")
+ nas_server_resource = self.get_nas_server_obj(id=nas_server_id)
+
+ '''
+ Get filesystem Object
+ '''
+ if filesystem_name is not None:
+ if utils.is_input_empty(filesystem_name):
+ self.module.fail_json(msg="Invalid filesystem_name given,"
+ " Please provide a valid name.")
+ filesystem_obj = self \
+ .get_filesystem(nas_server=nas_server_resource,
+ name=filesystem_name)
+ fs_id = filesystem_obj.id
+ elif filesystem_id is not None:
+ if utils.is_input_empty(filesystem_id):
+ self.module.fail_json(msg="Invalid filesystem_id given,"
+ " Please provide a valid ID.")
+ filesystem_obj = self \
+ .get_filesystem(id=filesystem_id)
+ if filesystem_obj:
+ filesystem_obj = filesystem_obj[0]
+ fs_id = filesystem_obj.id
+ else:
+ self.module.fail_json(msg="Filesystem does not exist.")
+
+ if (user_name or win_domain) and (soft_limit or hard_limit) \
+ and user_type is None:
+ self.module.fail_json(msg="Invalid user_type given,"
+ " Please provide a valid user_type.")
+
+ # Check the sharing protocol supported by the filesystem
+ # while creating a user quota
+ if filesystem_obj and (soft_limit is not None or hard_limit is not None):
+ supported_protocol = filesystem_obj.supported_protocols
+
+ if supported_protocol == utils.FSSupportedProtocolEnum["CIFS"] \
+ and (user_type == "Unix" or uid):
+ self.module.fail_json(msg="This filesystem supports only SMB protocol "
+ "and applicable only for windows users. "
+ "Please provide valid windows details.")
+ elif supported_protocol == utils.FSSupportedProtocolEnum["NFS"] \
+ and user_type == "Windows":
+ self.module.fail_json(msg="This filesystem supports only NFS protocol "
+ "and applicable only for unix users. "
+ "Please provide valid uid or unix details.")
+
+ '''
+ Validate user type or uid
+ '''
+ if uid and (utils.is_input_empty(uid) or not uid.isnumeric()):
+ self.module.fail_json(msg=" UID is empty. Please provide valid UID.")
+ if user_type:
+ if user_type == "Unix":
+ if user_name is None or utils.is_input_empty(user_name):
+ self.module.fail_json(msg=" 'user_name' is empty. Please provide valid user_name.")
+
+ if user_type == "Windows":
+ if win_domain is None or utils.is_input_empty(win_domain):
+ self.module.fail_json(msg=" 'win_domain' is empty. Please provide valid win_domain.")
+ elif user_name is None or utils.is_input_empty(user_name):
+ self.module.fail_json(msg=" 'user_name' is empty. Please provide valid user_name.")
+
+ if user_type != "Unix" and win_domain:
+ win_domain = win_domain.replace(".com", "")
+ win_name = win_domain + '\\' + user_name
+
+ if win_name is None and user_name:
+ unix_name = user_name
+
+ '''
+ Check if quota tree is already present in the filesystem
+ '''
+ if tree_quota_id or path:
+ quota_tree_id_present = self.check_quota_tree_is_present(fs_id, path, tree_quota_id)
+ tree_quota_id = quota_tree_id_present
+
+ '''
+ Check if the user quota is already present in the filesystem/ quota tree
+ '''
+ if tree_quota_id:
+ user_id_present = self.check_user_quota_in_quota_tree(tree_quota_id, uid, unix_name, win_name,
+ user_quota_id)
+ fs_id = None if tree_quota_id is not None else fs_id
+ else:
+ user_id_present = self.check_user_is_present(fs_id, uid, unix_name, win_name, user_quota_id)
+
+ if user_id_present:
+ user_quota_id = user_id_present
+
+ if state == "present":
+ if user_quota_id:
+ # Modify user quota. If no change modify_user_quota is false.
+ result['modify_user_quota'] = self.modify_user_quota(user_quota_id, soft_limit,
+ hard_limit, cap_unit)
+
+ else:
+ LOG.info("Creating user quota")
+ create_user_quota_obj = self.create_user_quota(fs_id, soft_limit, hard_limit,
+ cap_unit, uid, unix_name, win_name,
+ tree_quota_id)
+ if create_user_quota_obj:
+ user_quota_id = create_user_quota_obj.id
+ result['create_user_quota'] = True
+ else:
+ user_quota_id = None
+ '''
+ Deleting user quota.
+ When both soft limit and hard limit are set to 0, it implies the user quota has
+ unlimited quota. Thereby, Unity removes the user quota id.
+ '''
+
+ if state == "absent" and user_quota_id:
+ soft_limit = 0
+ hard_limit = 0
+ err_msg = "Deleting user quota %s" % user_quota_id
+ LOG.info(err_msg)
+ result['delete_user_quota'] = self.modify_user_quota(user_quota_id,
+ soft_limit, hard_limit, cap_unit)
+ '''
+ Get user details
+ '''
+
+ if state == "present" and user_quota_id:
+ user_quota_details = self.get_filesystem_user_quota_display_attributes(user_quota_id)
+
+ result['get_user_quota_details'] = user_quota_details
+ if result['create_user_quota'] or result['modify_user_quota'] or result['delete_user_quota']:
+ result['changed'] = True
+
+ self.module.exit_json(**result)
+
+
+def get_user_quota_parameters():
+ """This method provide parameters required for the ansible filesystem
+ user quota module on Unity"""
+ return dict(
+ filesystem_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ user_type=dict(required=False, type='str',
+ choices=['Windows', 'Unix']),
+ user_name=dict(required=False, type='str'),
+ uid=dict(required=False, type='str'),
+ win_domain=dict(required=False, type='str'),
+ hard_limit=dict(required=False, type='int'),
+ soft_limit=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
+ user_quota_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ tree_quota_id=dict(required=False, type='str'),
+ path=dict(required=False, type='str', no_log=True)
+ )
+
+
+def main():
+ """ Create Unity user quota object and perform action on it
+ based on user input from playbook"""
+ obj = UserQuota()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/volume.py b/ansible_collections/dellemc/unity/plugins/modules/volume.py
new file mode 100644
index 000000000..82bcb0174
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/volume.py
@@ -0,0 +1,1277 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing volumes on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+
+module: volume
+version_added: '1.1.0'
+short_description: Manage volume on Unity storage system
+description:
+- Managing volume on Unity storage system includes-
+ Create new volume,
+ Modify volume attributes,
+ Map Volume to host,
+ Unmap volume to host,
+ Display volume details,
+ Delete volume.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Arindam Datta (@arindam-emc) <ansible.team@dell.com>
+- Pavan Mudunuri(@Pavan-Mudunuri) <ansible.team@dell.com>
+
+options:
+ vol_name:
+ description:
+ - The name of the volume. Mandatory only for create operation.
+ type: str
+ vol_id:
+ description:
+ - The id of the volume.
+ - It can be used only for get, modify, map/unmap host, or delete operation.
+ type: str
+ pool_name:
+ description:
+ - This is the name of the pool where the volume will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new volume.
+ type: str
+ pool_id:
+ description:
+ - This is the id of the pool where the volume will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new volume.
+ type: str
+ size:
+ description:
+ - The size of the volume.
+ type: int
+ cap_unit:
+ description:
+ - The unit of the volume size. It defaults to C(GB), if not specified.
+ choices: ['GB' , 'TB']
+ type: str
+ description:
+ description:
+ - Description about the volume.
+ - Description can be removed by passing empty string ("").
+ type: str
+ snap_schedule:
+ description:
+ - Snapshot schedule assigned to the volume.
+ - Add/Remove/Modify the snapshot schedule for the volume.
+ type: str
+ compression:
+ description:
+ - Boolean variable, Specifies whether or not to enable compression.
+ Compression is supported only for thin volumes.
+ type: bool
+ advanced_dedup:
+ description:
+ - Boolean variable, Indicates whether or not to enable advanced deduplication.
+ - Compression should be enabled to enable advanced deduplication.
+ - It can only be enabled on the all flash high end platforms.
+ - Deduplicated data will remain as is even after advanced deduplication is disabled.
+ type: bool
+ is_thin:
+ description:
+ - Boolean variable, Specifies whether or not it is a thin volume.
+ - The value is set as C(true) by default if not specified.
+ type: bool
+ sp:
+ description:
+ - Storage Processor for this volume.
+ choices: ['SPA' , 'SPB']
+ type: str
+ io_limit_policy:
+ description:
+ - IO limit policy associated with this volume.
+ Once it is set, it cannot be removed through ansible module but it can
+ be changed.
+ type: str
+ host_name:
+ description:
+ - Name of the host to be mapped/unmapped with this volume.
+ - Either I(host_name) or I(host_id) can be specified in one task along with
+ I(mapping_state).
+ type: str
+ host_id:
+ description:
+ - ID of the host to be mapped/unmapped with this volume.
+ - Either I(host_name) or I(host_id) can be specified in one task along with
+ I(mapping_state).
+ type: str
+ hlu:
+ description:
+ - Host Lun Unit to be mapped/unmapped with this volume.
+ - It is an optional parameter, hlu can be specified along
+ with I(host_name) or I(host_id) and I(mapping_state).
+ - If I(hlu) is not specified, unity will choose it automatically.
+ The maximum value supported is C(255).
+ type: int
+ mapping_state:
+ description:
+ - State of host access for volume.
+ choices: ['mapped' , 'unmapped']
+ type: str
+ new_vol_name:
+ description:
+ - New name of the volume for rename operation.
+ type: str
+ tiering_policy:
+ description:
+ - Tiering policy choices for how the storage resource data will be
+ distributed among the tiers available in the pool.
+ choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']
+ type: str
+ state:
+ description:
+ - State variable to determine whether volume will exist or not.
+ choices: ['absent', 'present']
+ required: true
+ type: str
+ hosts:
+ description:
+ - Name of hosts for mapping to a volume.
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ hlu:
+ description:
+ - Host Lun Unit to be mapped/unmapped with this volume.
+ - It is an optional parameter, I(hlu) can be specified along
+ with I(host_name) or I(host_id) and I(mapping_state).
+ - If I(hlu) is not specified, unity will choose it automatically.
+ The maximum value supported is C(255).
+ type: str
+
+notes:
+ - The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create Volume
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ description: "{{description}}"
+ pool_name: "{{pool}}"
+ size: 2
+ cap_unit: "{{cap_GB}}"
+ is_thin: True
+ compression: True
+ advanced_dedup: True
+ state: "{{state_present}}"
+
+- name: Expand Volume by volume id
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ size: 5
+ cap_unit: "{{cap_GB}}"
+ state: "{{state_present}}"
+
+- name: Modify Volume, map host by host_name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ host_name: "{{host_name}}"
+ hlu: 5
+ mapping_state: "{{state_mapped}}"
+ state: "{{state_present}}"
+
+- name: Modify Volume, unmap host mapping by host_name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ host_name: "{{host_name}}"
+ mapping_state: "{{state_unmapped}}"
+ state: "{{state_present}}"
+
+- name: Map multiple hosts to a Volume
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ hosts:
+ - host_name: "10.226.198.248"
+ hlu: 1
+ - host_id: "Host_929"
+ hlu: 2
+ mapping_state: "mapped"
+ state: "present"
+
+- name: Modify Volume attributes
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ new_vol_name: "{{new_vol_name}}"
+ tiering_policy: "AUTOTIER"
+ compression: True
+ is_thin: True
+ advanced_dedup: True
+ state: "{{state_present}}"
+
+- name: Delete Volume by vol name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ state: "{{state_absent}}"
+
+- name: Delete Volume by vol id
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ state: "{{state_absent}}"
+"""
+
+RETURN = r'''
+
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+volume_details:
+ description: Details of the volume.
+ returned: When volume exists
+ type: dict
+ contains:
+ id:
+ description: The system generated ID given to the volume.
+ type: str
+ name:
+ description: Name of the volume.
+ type: str
+ description:
+ description: Description about the volume.
+ type: str
+ is_data_reduction_enabled:
+ description: Whether or not compression enabled on this volume.
+ type: bool
+ size_total_with_unit:
+ description: Size of the volume with actual unit.
+ type: str
+ snap_schedule:
+ description: Snapshot schedule applied to this volume.
+ type: dict
+ tiering_policy:
+ description: Tiering policy applied to this volume.
+ type: str
+ current_sp:
+ description: Current storage processor for this volume.
+ type: str
+ pool:
+ description: The pool in which this volume is allocated.
+ type: dict
+ host_access:
+ description: Host mapped to this volume.
+ type: list
+ io_limit_policy:
+ description: IO limit policy associated with this volume.
+ type: dict
+ wwn:
+ description: The world wide name of this volume.
+ type: str
+ is_thin_enabled:
+ description: Indicates whether thin provisioning is enabled for this
+ volume.
+ type: bool
+ sample: {
+ "current_node": "NodeEnum.SPB",
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "default_node": "NodeEnum.SPB",
+ "description": null,
+ "effective_io_limit_max_iops": null,
+ "effective_io_limit_max_kbps": null,
+ "existed": true,
+ "family_base_lun": {
+ "UnityLun": {
+ "hash": 8774954523796,
+ "id": "sv_27"
+ }
+ },
+ "family_clone_count": 0,
+ "hash": 8774954522426,
+ "health": {
+ "UnityHealth": {
+ "hash": 8774954528278
+ }
+ },
+ "host_access": [
+ {
+ "accessMask": "PRODUCTION",
+ "hlu": 0,
+ "id": "Host_75",
+ "name": "10.226.198.250"
+ }
+ ],
+ "id": "sv_27",
+ "io_limit_policy": null,
+ "is_advanced_dedup_enabled": false,
+ "is_compression_enabled": null,
+ "is_data_reduction_enabled": false,
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": false,
+ "is_thin_clone": false,
+ "is_thin_enabled": false,
+ "metadata_size": 4294967296,
+ "metadata_size_allocated": 4026531840,
+ "name": "VSI-UNITY-test-task",
+ "per_tier_size_used": [
+ 111400714240,
+ 0,
+ 0
+ ],
+ "pool": {
+ "id": "pool_3",
+ "name": "Extreme_Perf_tier"
+ },
+ "size_allocated": 107374182400,
+ "size_total": 107374182400,
+ "size_total_with_unit": "100.0 GB",
+ "size_used": null,
+ "snap_count": 0,
+ "snap_schedule": null,
+ "snap_wwn": "60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97",
+ "snaps_size": 0,
+ "snaps_size_allocated": 0,
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8774954518887
+ }
+ },
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "LUNTypeEnum.VMWARE_ISCSI",
+ "wwn": "60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import logging
+
+LOG = utils.get_logger('volume')
+
+application_type = "Ansible/1.6.0"
+
+
+def is_none_or_empty_string(param):
+
+ """ validates the input string for None or empty values
+ """
+ return not param or len(str(param)) <= 0
+
+
+class Volume(object):
+
+ """Class with volume operations"""
+
+ param_host_id = None
+ param_io_limit_pol_id = None
+ param_snap_schedule_name = None
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_volume_parameters())
+
+ mutually_exclusive = [['vol_name', 'vol_id'],
+ ['pool_name', 'pool_id'],
+ ['host_name', 'host_id']]
+
+ required_one_of = [['vol_name', 'vol_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def get_volume(self, vol_name=None, vol_id=None):
+ """Get the details of a volume.
+ :param vol_name: The name of the volume
+ :param vol_id: The id of the volume
+ :return: instance of the respective volume if exist.
+ """
+
+ id_or_name = vol_id if vol_id else vol_name
+ errormsg = "Failed to get the volume {0} with error {1}"
+
+ try:
+
+ obj_vol = self.unity_conn.get_lun(name=vol_name, _id=vol_id)
+
+ if vol_id and obj_vol.existed:
+ LOG.info("Successfully got the volume object %s ", obj_vol)
+ return obj_vol
+ elif vol_name:
+ LOG.info("Successfully got the volume object %s ", obj_vol)
+ return obj_vol
+ else:
+ LOG.info("Failed to get the volume %s", id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, cred_err)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host(self, host_name=None, host_id=None):
+ """Get the instance of a host.
+ :param host_name: The name of the host
+ :param host_id: The id of the volume
+ :return: instance of the respective host if exist.
+ """
+
+ id_or_name = host_id if host_id else host_name
+ errormsg = "Failed to get the host {0} with error {1}"
+
+ try:
+
+ obj_host = self.unity_conn.get_host(name=host_name, _id=host_id)
+
+ if host_id and obj_host.existed:
+ LOG.info("Successfully got the host object %s ", obj_host)
+ return obj_host
+ elif host_name:
+ LOG.info("Successfully got the host object %s ", obj_host)
+ return obj_host
+ else:
+ msg = "Failed to get the host {0}".format(id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snap_schedule(self, name):
+ """Get the instance of a snapshot schedule.
+ :param name: The name of the snapshot schedule
+ :return: instance of the respective snapshot schedule if exist.
+ """
+
+ errormsg = "Failed to get the snapshot schedule {0} with error {1}"
+
+ try:
+ LOG.debug("Attempting to get Snapshot Schedule with name %s",
+ name)
+ obj_ss = utils.UnitySnapScheduleList.get(self.unity_conn._cli,
+ name=name)
+ if obj_ss and (len(obj_ss) > 0):
+ LOG.info("Successfully got Snapshot Schedule %s", obj_ss)
+ return obj_ss
+ else:
+ msg = "Failed to get snapshot schedule " \
+ "with name {0}".format(name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_io_limit_policy(self, name=None, id=None):
+ """Get the instance of a io limit policy.
+ :param name: The io limit policy name
+ :param id: The io limit policy id
+ :return: instance of the respective io_limit_policy if exist.
+ """
+
+ errormsg = "Failed to get the io limit policy {0} with error {1}"
+ id_or_name = name if name else id
+
+ try:
+ obj_iopol = self.unity_conn.get_io_limit_policy(_id=id, name=name)
+ if id and obj_iopol.existed:
+ LOG.info("Successfully got the IO limit policy object %s",
+ obj_iopol)
+ return obj_iopol
+ elif name:
+ LOG.info("Successfully got the IO limit policy object %s ",
+ obj_iopol)
+ return obj_iopol
+ else:
+ msg = "Failed to get the io limit policy with {0}".format(
+ id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_pool(self, pool_name=None, pool_id=None):
+ """Get the instance of a pool.
+ :param pool_name: The name of the pool
+ :param pool_id: The id of the pool
+ :return: Dict containing pool details if exists
+ """
+
+ id_or_name = pool_id if pool_id else pool_name
+ errormsg = "Failed to get the pool {0} with error {1}"
+
+ try:
+ obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)
+
+ if pool_id and obj_pool.existed:
+ LOG.info("Successfully got the pool object %s",
+ obj_pool)
+ return obj_pool
+ if pool_name:
+ LOG.info("Successfully got pool %s", obj_pool)
+ return obj_pool
+ else:
+ msg = "Failed to get the pool with " \
+ "{0}".format(id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_node_enum(self, sp):
+ """Get the storage processor enum.
+ :param sp: The storage processor string
+ :return: storage processor enum
+ """
+
+ if sp in utils.NodeEnum.__members__:
+ return utils.NodeEnum[sp]
+ else:
+ errormsg = "Invalid choice {0} for storage processor".format(
+ sp)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_tiering_policy_enum(self, tiering_policy):
+ """Get the tiering_policy enum.
+ :param tiering_policy: The tiering_policy string
+ :return: tiering_policy enum
+ """
+
+ if tiering_policy in utils.TieringPolicyEnum.__members__:
+ return utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_volume(self, obj_pool, size, host_access=None):
+ """Create a volume.
+ :param obj_pool: pool object instance
+ :param size: size of the volume in GB
+ :param host_access: host to be associated with this volume
+ :return: Volume object on successful creation
+ """
+
+ vol_name = self.module.params['vol_name']
+
+ try:
+
+ description = self.module.params['description']
+ compression = self.module.params['compression']
+ advanced_dedup = self.module.params['advanced_dedup']
+ is_thin = self.module.params['is_thin']
+ snap_schedule = None
+
+ sp = self.module.params['sp']
+ sp = self.get_node_enum(sp) if sp else None
+
+ io_limit_policy = self.get_io_limit_policy(
+ id=self.param_io_limit_pol_id) \
+ if self.module.params['io_limit_policy'] else None
+
+ if self.param_snap_schedule_name:
+ snap_schedule = {"name": self.param_snap_schedule_name}
+
+ tiering_policy = self.module.params['tiering_policy']
+ tiering_policy = self.get_tiering_policy_enum(tiering_policy) \
+ if tiering_policy else None
+
+ obj_vol = obj_pool.create_lun(lun_name=vol_name,
+ size_gb=size,
+ sp=sp,
+ host_access=host_access,
+ is_thin=is_thin,
+ description=description,
+ tiering_policy=tiering_policy,
+ snap_schedule=snap_schedule,
+ io_limit_policy=io_limit_policy,
+ is_compression=compression,
+ is_advanced_dedup_enabled=advanced_dedup)
+
+ LOG.info("Successfully created volume , %s", obj_vol)
+
+ return obj_vol
+
+ except Exception as e:
+ errormsg = "Create volume operation {0} failed" \
+ " with error {1}".format(vol_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def host_access_modify_required(self, host_access_list):
+ """Check if host access modification is required
+ :param host_access_list: host access dict list
+ :return: Dict with attributes to modify, or None if no
+ modification is required.
+ """
+
+ try:
+ to_modify = False
+ mapping_state = self.module.params['mapping_state']
+
+ host_id_list = []
+ hlu_list = []
+ new_list = []
+ if not host_access_list and self.new_host_list and\
+ mapping_state == 'unmapped':
+ return to_modify
+
+ elif host_access_list:
+ for host_access in host_access_list.host:
+ host_id_list.append(host_access.id)
+ host = self.get_host(host_id=host_access.id).update()
+ host_dict = host.host_luns._get_properties()
+ LOG.debug("check if hlu present : %s", host_dict)
+
+ if "hlu" in host_dict.keys():
+ hlu_list.append(host_dict['hlu'])
+
+ if mapping_state == 'mapped':
+ if (self.param_host_id not in host_id_list):
+ for item in self.new_host_list:
+ new_list.append(item.get("host_id"))
+ if not list(set(new_list) - set(host_id_list)):
+ return False
+ to_modify = True
+
+ if mapping_state == 'unmapped':
+ if self.new_host_list:
+ for item in self.new_host_list:
+ new_list.append(item.get("host_id"))
+ if list(set(new_list) - set(host_id_list)):
+ return False
+ self.overlapping_list = list(set(host_id_list) - set(new_list))
+ to_modify = True
+ LOG.debug("host_access_modify_required : %s ", str(to_modify))
+ return to_modify
+
+ except Exception as e:
+ errormsg = "Failed to compare the host_access with error {0} " \
+ "{1}".format(host_access_list, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def volume_modify_required(self, obj_vol, cap_unit):
+ """Check if volume modification is required
+ :param obj_vol: volume instance
+ :param cap_unit: capacity unit
+ :return: Boolean value to indicate if modification is required
+ """
+
+ try:
+ to_update = {}
+
+ new_vol_name = self.module.params['new_vol_name']
+ if new_vol_name and obj_vol.name != new_vol_name:
+ to_update.update({'name': new_vol_name})
+
+ description = self.module.params['description']
+ if description and obj_vol.description != description:
+ to_update.update({'description': description})
+
+ size = self.module.params['size']
+ if size and cap_unit:
+ size_byte = int(utils.get_size_bytes(size, cap_unit))
+ if size_byte < obj_vol.size_total:
+ self.module.fail_json(msg="Volume size can be "
+ "expanded only")
+ elif size_byte > obj_vol.size_total:
+ to_update.update({'size': size_byte})
+
+ compression = self.module.params['compression']
+ if compression is not None and \
+ compression != obj_vol.is_data_reduction_enabled:
+ to_update.update({'is_compression': compression})
+
+ advanced_dedup = self.module.params['advanced_dedup']
+ if advanced_dedup is not None and \
+ advanced_dedup != obj_vol.is_advanced_dedup_enabled:
+ to_update.update({'is_advanced_dedup_enabled': advanced_dedup})
+
+ is_thin = self.module.params['is_thin']
+ if is_thin is not None and is_thin != obj_vol.is_thin_enabled:
+ self.module.fail_json(msg="Modifying is_thin is not allowed")
+
+ sp = self.module.params['sp']
+ if sp and self.get_node_enum(sp) != obj_vol.current_node:
+ to_update.update({'sp': self.get_node_enum(sp)})
+
+ tiering_policy = self.module.params['tiering_policy']
+ if tiering_policy and self.get_tiering_policy_enum(
+ tiering_policy) != obj_vol.tiering_policy:
+ to_update.update({'tiering_policy':
+ self.get_tiering_policy_enum(
+ tiering_policy)})
+
+ # prepare io_limit_policy object
+ if self.param_io_limit_pol_id:
+ if (not obj_vol.io_limit_policy) \
+ or (self.param_io_limit_pol_id
+ != obj_vol.io_limit_policy.id):
+ to_update.update(
+ {'io_limit_policy': self.param_io_limit_pol_id})
+
+ # prepare snap_schedule object
+ if self.param_snap_schedule_name:
+ if (not obj_vol.snap_schedule) \
+ or (self.param_snap_schedule_name
+ != obj_vol.snap_schedule.name):
+ to_update.update({'snap_schedule':
+ self.param_snap_schedule_name})
+
+ # for removing existing snap_schedule
+ if self.param_snap_schedule_name == "":
+ if obj_vol.snap_schedule:
+ to_update.update({'is_snap_schedule_paused': False})
+ else:
+ LOG.warn("No snapshot schedule is associated")
+
+ LOG.debug("Volume to modify Dict : %s", to_update)
+ if len(to_update) > 0:
+ return to_update
+ else:
+ return None
+
+ except Exception as e:
+ errormsg = "Failed to determine if volume {0},requires " \
+ "modification, with error {1}".format(obj_vol.name,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def multiple_host_map(self, host_dic_list, obj_vol):
+ """Attach multiple hosts to a volume
+ :param host_dic_list: hosts to map the volume
+ :param obj_vol: volume instance
+ :return: response from API call
+ """
+
+ try:
+ host_access = []
+ current_hosts = self.get_volume_host_access_list(obj_vol)
+ for existing_host in current_hosts:
+ host_access.append(
+ {'accessMask': eval('utils.HostLUNAccessEnum.' + existing_host['accessMask']),
+ 'host':
+ {'id': existing_host['id']}, 'hlu': existing_host['hlu']})
+ for item in host_dic_list:
+ host_access.append(
+ {'accessMask': utils.HostLUNAccessEnum.PRODUCTION,
+ 'host':
+ {'id': item['host_id']}, 'hlu': item['hlu']})
+ resp = obj_vol.modify(host_access=host_access)
+ return resp
+ except Exception as e:
+ errormsg = "Failed to attach hosts {0} with volume {1} with error {2} ".format(host_dic_list, obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def multiple_detach(self, host_list_detach, obj_vol):
+ """Detach multiple hosts from a volume
+ :param host_list_detach: hosts to unmap the volume
+ :param obj_vol: volume instance
+ :return: response from API call
+ """
+
+ try:
+ host_access = []
+ for item in host_list_detach:
+ host_access.append({'accessMask': utils.HostLUNAccessEnum.PRODUCTION,
+ 'host': {'id': item}})
+ resp = obj_vol.modify(host_access=host_access)
+ return resp
+ except Exception as e:
+ errormsg = "Failed to detach hosts {0} from volume {1} with error {2} ".format(host_list_detach, obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_volume(self, obj_vol, to_modify_dict):
+ """modify volume attributes
+ :param obj_vol: volume instance
+ :param to_modify_dict: dict containing attributes to be modified.
+ :return: None
+ """
+
+ try:
+
+ if 'io_limit_policy' in to_modify_dict.keys():
+ to_modify_dict['io_limit_policy'] = self.get_io_limit_policy(
+ id=to_modify_dict['io_limit_policy'])
+
+ if 'snap_schedule' in to_modify_dict.keys() and \
+ to_modify_dict['snap_schedule'] != "":
+ to_modify_dict['snap_schedule'] = \
+ {"name": to_modify_dict['snap_schedule']}
+
+ param_list = ['name', 'size', 'host_access', 'description', 'sp',
+ 'io_limit_policy', 'tiering_policy',
+ 'snap_schedule', 'is_snap_schedule_paused',
+ 'is_compression', 'is_advanced_dedup_enabled']
+
+ for item in param_list:
+ if item not in to_modify_dict.keys():
+ to_modify_dict.update({item: None})
+
+ LOG.debug("Final update dict before modify "
+ "api call: %s", to_modify_dict)
+
+ obj_vol.modify(name=to_modify_dict['name'],
+ size=to_modify_dict['size'],
+ host_access=to_modify_dict['host_access'],
+ description=to_modify_dict['description'],
+ sp=to_modify_dict['sp'],
+ io_limit_policy=to_modify_dict['io_limit_policy'],
+ tiering_policy=to_modify_dict['tiering_policy'],
+ snap_schedule=to_modify_dict['snap_schedule'],
+ is_snap_schedule_paused=to_modify_dict['is_snap_schedule_paused'],
+ is_compression=to_modify_dict['is_compression'],
+ is_advanced_dedup_enabled=to_modify_dict['is_advanced_dedup_enabled'])
+
+ except Exception as e:
+ errormsg = "Failed to modify the volume {0} " \
+ "with error {1}".format(obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_volume(self, vol_id):
+ """Delete volume.
+ :param vol_obj: The object instance of the volume to be deleted
+ """
+
+ try:
+ obj_vol = self.get_volume(vol_id=vol_id)
+ obj_vol.delete(force_snap_delete=False)
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of volume id:{0} " \
+ "failed with error {1}".format(id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_volume_host_access_list(self, obj_vol):
+ """
+ Get volume host access list
+ :param obj_vol: volume instance
+ :return: host list
+ """
+ host_list = []
+ if obj_vol.host_access:
+ for host_access in obj_vol.host_access:
+ host = self.get_host(host_id=host_access.host.id).update()
+ hlu = None
+ for host_lun in host.host_luns:
+ if host_lun.lun.name == obj_vol.name:
+ hlu = host_lun.hlu
+ host_list.append({'name': host_access.host.name,
+ 'id': host_access.host.id,
+ 'accessMask': host_access.access_mask.name,
+ 'hlu': hlu})
+ return host_list
+
+ def get_volume_display_attributes(self, obj_vol):
+ """get display volume attributes
+ :param obj_vol: volume instance
+ :return: volume dict to display
+ """
+ try:
+ obj_vol = obj_vol.update()
+ volume_details = obj_vol._get_properties()
+ volume_details['size_total_with_unit'] = utils. \
+ convert_size_with_unit(int(volume_details['size_total']))
+ volume_details.update({'host_access': self.get_volume_host_access_list(obj_vol)})
+ if obj_vol.snap_schedule:
+ volume_details.update(
+ {'snap_schedule': {'name': obj_vol.snap_schedule.name,
+ 'id': obj_vol.snap_schedule.id}})
+ if obj_vol.io_limit_policy:
+ volume_details.update(
+ {'io_limit_policy': {'name': obj_vol.io_limit_policy.id,
+ 'id': obj_vol.io_limit_policy.id}})
+ if obj_vol.pool:
+ volume_details.update({'pool': {'name': obj_vol.pool.name,
+ 'id': obj_vol.pool.id}})
+
+ return volume_details
+
+ except Exception as e:
+ errormsg = "Failed to display the volume {0} with " \
+ "error {1}".format(obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_input_string(self):
+ """ validates the input string checks if it is empty string
+
+ """
+ invalid_string = ""
+ try:
+ no_chk_list = ['snap_schedule', 'description']
+ for key in self.module.params:
+ val = self.module.params[key]
+ if key not in no_chk_list and isinstance(val, str) \
+ and val == invalid_string:
+ errmsg = 'Invalid input parameter "" for {0}'.format(
+ key)
+ self.module.fail_json(msg=errmsg)
+
+ except Exception as e:
+ errormsg = "Failed to validate the module param with " \
+ "error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_host_list(self, host_list_input):
+ """ validates the host_list_input value for None and empty
+
+ """
+ try:
+ for host_list in host_list_input:
+ if ("host_name" in host_list.keys() and "host_id" in host_list.keys()):
+ if host_list["host_name"] and host_list["host_id"]:
+ errmsg = 'parameters are mutually exclusive: host_name|host_id'
+ self.module.fail_json(msg=errmsg)
+ is_host_details_missing = True
+ for key, value in host_list.items():
+ if key == "host_name" and not is_none_or_empty_string(value):
+ is_host_details_missing = False
+ elif key == "host_id" and not is_none_or_empty_string(value):
+ is_host_details_missing = False
+
+ if is_host_details_missing:
+ errmsg = 'Invalid input parameter for {0}'.format(key)
+ self.module.fail_json(msg=errmsg)
+
+ except Exception as e:
+ errormsg = "Failed to validate the module param with " \
+ "error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def resolve_host_mappings(self, hosts):
+ """ This method creates a dictionary of hosts and hlu parameter values
+ :param hosts: host and hlu value passed from input file
+ :return: list of host and hlu dictionary
+ """
+ host_list_new = []
+
+ if hosts:
+ for item in hosts:
+ host_dict = dict()
+ host_id = None
+ hlu = None
+ if item['host_name']:
+ host = self.get_host(host_name=item['host_name'])
+ if host:
+ host_id = host.id
+ if item['host_id']:
+ host_id = item['host_id']
+ if item['hlu']:
+ hlu = item['hlu']
+ host_dict['host_id'] = host_id
+ host_dict['hlu'] = hlu
+ host_list_new.append(host_dict)
+ return host_list_new
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on volume module based on parameters
+ passed in the playbook
+ """
+ self.new_host_list = []
+ self.overlapping_list = []
+ vol_name = self.module.params['vol_name']
+ vol_id = self.module.params['vol_id']
+ pool_name = self.module.params['pool_name']
+ pool_id = self.module.params['pool_id']
+ size = self.module.params['size']
+ cap_unit = self.module.params['cap_unit']
+ snap_schedule = self.module.params['snap_schedule']
+ io_limit_policy = self.module.params['io_limit_policy']
+ host_name = self.module.params['host_name']
+ host_id = self.module.params['host_id']
+ hlu = self.module.params['hlu']
+ mapping_state = self.module.params['mapping_state']
+ new_vol_name = self.module.params['new_vol_name']
+ state = self.module.params['state']
+ hosts = self.module.params['hosts']
+
+ # result is a dictionary to contain end state and volume details
+ changed = False
+ result = dict(
+ changed=False,
+ volume_details={}
+ )
+
+ to_modify_dict = None
+ volume_details = None
+ to_modify_host = False
+
+ self.validate_input_string()
+
+ if hosts:
+ self.validate_host_list(hosts)
+
+ if size is not None and size == 0:
+ self.module.fail_json(msg="Size can not be 0 (Zero)")
+
+ if size and not cap_unit:
+ cap_unit = 'GB'
+
+ if (cap_unit is not None) and not size:
+ self.module.fail_json(msg="cap_unit can be specified along "
+ "with size")
+
+ if hlu and (not host_name and not host_id and not hosts):
+ self.module.fail_json(msg="hlu can be specified with "
+ "host_id or host_name")
+ if mapping_state and (not host_name and not host_id and not hosts):
+ self.module.fail_json(msg="mapping_state can be specified"
+ " with host_id or host_name or hosts")
+
+ obj_vol = self.get_volume(vol_id=vol_id, vol_name=vol_name)
+
+ if host_name or host_id:
+ if not mapping_state:
+ errmsg = "'mapping_state' is required along with " \
+ "'host_name' or 'host_id' or 'hosts'"
+ self.module.fail_json(msg=errmsg)
+ host = [{'host_name': host_name, 'host_id': host_id, 'hlu': hlu}]
+ self.new_host_list = self.resolve_host_mappings(host)
+
+ if hosts:
+ if not mapping_state:
+ errmsg = "'mapping_state' is required along with " \
+ "'host_name' or 'host_id' or 'hosts'"
+ self.module.fail_json(msg=errmsg)
+ self.new_host_list += self.resolve_host_mappings(hosts)
+
+ if io_limit_policy:
+ io_limit_policy = self.get_io_limit_policy(name=io_limit_policy)
+ self.param_io_limit_pol_id = io_limit_policy.id
+
+ if snap_schedule:
+ snap_schedule = self.get_snap_schedule(name=snap_schedule)
+ self.param_snap_schedule_name = snap_schedule.name[0]
+
+ # this is for removing existing snap_schedule
+ if snap_schedule == "":
+ self.param_snap_schedule_name = snap_schedule
+
+ if obj_vol:
+ volume_details = obj_vol._get_properties()
+ vol_id = obj_vol.get_id()
+ to_modify_dict = self.volume_modify_required(obj_vol, cap_unit)
+ LOG.debug("Volume Modify Required: %s", to_modify_dict)
+ if obj_vol.host_access:
+ to_modify_host = self.host_access_modify_required(
+ host_access_list=obj_vol.host_access)
+ LOG.debug("Host Modify Required in access: %s", to_modify_host)
+ elif self.new_host_list:
+ to_modify_host = self.host_access_modify_required(
+ host_access_list=obj_vol.host_access)
+ LOG.debug("Host Modify Required: %s", to_modify_host)
+
+ if state == 'present' and not volume_details:
+ if not vol_name:
+ msg_noname = "volume with id {0} is not found, unable to " \
+ "create a volume without a valid " \
+ "vol_name".format(vol_id)
+ self.module.fail_json(msg=msg_noname)
+
+ if snap_schedule == "":
+ self.module.fail_json(msg="Invalid snap_schedule")
+
+ if new_vol_name:
+ self.module.fail_json(msg="new_vol_name is not required "
+ "to create a new volume")
+ if not pool_name and not pool_id:
+ self.module.fail_json(msg="pool_id or pool_name is required "
+ "to create new volume")
+ if not size:
+ self.module.fail_json(msg="Size is required to create"
+ " a volume")
+ host_access = None
+ if self.new_host_list:
+ host_access = []
+ for item in self.new_host_list:
+ if item['hlu']:
+ host_access.append(
+ {'accessMask': utils.HostLUNAccessEnum.PRODUCTION, 'host': {'id': item['host_id']},
+ 'hlu': item['hlu']})
+ else:
+ host_access.append(
+ {'accessMask': utils.HostLUNAccessEnum.PRODUCTION, 'host': {'id': item['host_id']}})
+
+ size = utils.get_size_in_gb(size, cap_unit)
+
+ obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id)
+
+ obj_vol = self.create_volume(obj_pool=obj_pool, size=size,
+ host_access=host_access)
+ if obj_vol:
+ LOG.debug("Successfully created volume , %s", obj_vol)
+ vol_id = obj_vol.id
+ volume_details = obj_vol._get_properties()
+ LOG.debug("Got volume id , %s", vol_id)
+ changed = True
+
+ if state == 'present' and volume_details and to_modify_dict:
+ self.modify_volume(obj_vol=obj_vol, to_modify_dict=to_modify_dict)
+ changed = True
+
+ if (state == 'present' and volume_details
+ and mapping_state == 'mapped' and to_modify_host):
+ if self.new_host_list:
+ resp = self.multiple_host_map(host_dic_list=self.new_host_list, obj_vol=obj_vol)
+ changed = True if resp else False
+
+ if (state == 'present' and volume_details
+ and mapping_state == 'unmapped' and to_modify_host):
+ if self.new_host_list:
+ resp = self.multiple_detach(host_list_detach=self.overlapping_list, obj_vol=obj_vol)
+ LOG.info(resp)
+ changed = True if resp else False
+
+ if state == 'absent' and volume_details:
+ changed = self.delete_volume(vol_id)
+ volume_details = None
+
+ if state == 'present' and volume_details:
+ volume_details = self.get_volume_display_attributes(
+ obj_vol=obj_vol)
+
+ result['changed'] = changed
+ result['volume_details'] = volume_details
+ self.module.exit_json(**result)
+
+
+def get_volume_parameters():
+ """This method provide parameters required for the ansible volume
+ module on Unity"""
+ return dict(
+ vol_name=dict(required=False, type='str'),
+ vol_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ pool_name=dict(required=False, type='str'),
+ pool_id=dict(required=False, type='str'),
+ size=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['GB', 'TB']),
+ is_thin=dict(required=False, type='bool'),
+ compression=dict(required=False, type='bool'),
+ advanced_dedup=dict(required=False, type='bool'),
+ sp=dict(required=False, type='str', choices=['SPA', 'SPB']),
+ io_limit_policy=dict(required=False, type='str'),
+ snap_schedule=dict(required=False, type='str'),
+ host_name=dict(required=False, type='str'),
+ host_id=dict(required=False, type='str'),
+ hosts=dict(required=False, type='list', elements='dict',
+ options=dict(
+ host_id=dict(required=False, type='str'),
+ host_name=dict(required=False, type='str'),
+ hlu=dict(required=False, type='str')
+ )),
+ hlu=dict(required=False, type='int'),
+ mapping_state=dict(required=False, type='str',
+ choices=['mapped', 'unmapped']),
+ new_vol_name=dict(required=False, type='str'),
+ tiering_policy=dict(required=False, type='str', choices=[
+ 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity volume object and perform action on it
+ based on user input from playbook"""
+ obj = Volume()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()