summaryrefslogtreecommitdiffstats
path: root/ansible_collections/hpe/nimble/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/hpe/nimble/plugins')
-rw-r--r--ansible_collections/hpe/nimble/plugins/doc_fragments/hpe_nimble.py50
-rw-r--r--ansible_collections/hpe/nimble/plugins/module_utils/hpe_nimble.py332
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_access_control_record.py270
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_array.py468
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_chap_user.py274
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_disk.py200
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_encryption.py372
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_fc.py324
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_group.py1257
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_info.py1026
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_initiator_group.py357
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_network.py427
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_partner.py511
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_performance_policy.py343
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_pool.py352
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_schedule.py521
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_template.py386
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_shelf.py228
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot.py360
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot_collection.py403
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user.py381
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user_policy.py241
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume.py843
-rw-r--r--ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume_collection.py717
24 files changed, 10643 insertions, 0 deletions
diff --git a/ansible_collections/hpe/nimble/plugins/doc_fragments/hpe_nimble.py b/ansible_collections/hpe/nimble/plugins/doc_fragments/hpe_nimble.py
new file mode 100644
index 00000000..d2a31e0d
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/doc_fragments/hpe_nimble.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2021 Hewlett Packard Enterprise Development LP
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # HPE Nimble doc fragment
+ DOCUMENTATION = '''
+options:
+ host:
+ description:
+ - HPE Nimble Storage IP address.
+ required: True
+ type: str
+ password:
+ description:
+ - HPE Nimble Storage password.
+ required: True
+ type: str
+ username:
+ description:
+ - HPE Nimble Storage user name.
+ required: True
+ type: str
+requirements:
+ - Ansible 2.9 or later
+ - Python 3.6 or later
+ - HPE Nimble Storage SDK for Python
+ - HPE Nimble Storage arrays running NimbleOS 5.0 or later
+
+'''
diff --git a/ansible_collections/hpe/nimble/plugins/module_utils/hpe_nimble.py b/ansible_collections/hpe/nimble/plugins/module_utils/hpe_nimble.py
new file mode 100644
index 00000000..02c673e4
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/module_utils/hpe_nimble.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import datetime
+import uuid
+
+__version__ = "1.1.0"
+
+
+def is_null_or_empty(name):
+ if type(name) is bool:
+ return False
+ if not name or name == "":
+ return True
+ return False
+
+
+def get_unique_string(baseName):
+ unique_string = baseName + datetime.datetime.now().strftime(
+ "-%d-%m-%Y") + '-' + str(uuid.uuid1().time)
+ # make sure the length is not more than 64 char as nimble array allows only up to 64 char
+ unique_string = unique_string[:63]
+ return unique_string
+
+
+# remove arguments from kwargs which are by default none or empty
+def remove_null_args(**kwargs):
+ tosearch = kwargs.copy()
+ for key, value in tosearch.items():
+ # list can be empty in case of update. Hence we should not remove that arg
+ if type(value) is not bool and type(value) is not list:
+ if is_null_or_empty(value):
+ kwargs.pop(key)
+ return kwargs
+
+
+def is_dict_item_present_on_server(server_list_of_dict, dict_to_check):
+
+ if dict_to_check is None and server_list_of_dict is None:
+ return True
+ if len(dict_to_check) == 0:
+ return False
+ if type(server_list_of_dict) is not list:
+ return False
+
+ for server_dict in server_list_of_dict:
+ if (dict_to_check.items() <= server_dict.items()) is True:
+ return True
+ return False
+
+
+# remove unchanged item from kwargs by matching them with the data present in given object attrs
+def remove_unchanged_or_null_args(server_resp, **kwargs):
+ # Filter out null/empty arguments from the input
+ params = remove_null_args(**kwargs)
+ # check if server resp has attribute called attrs
+ if hasattr(server_resp, "attrs") is False or type(server_resp.attrs) is not dict:
+ return (params, params)
+
+ params_to_search = params.copy()
+ changed_attrs_dict = {}
+
+ for key, value in params_to_search.items():
+ # there could be a possibility that a user provided a wrong "key" name which is not at all present
+ # in server resp.In that case get() will return None and hence will be added to list of changed_attrs.
+ server_value = server_resp.attrs.get(key)
+
+ if type(server_value) is list and type(value) is dict:
+ if len(value) == 0:
+ continue
+ # we will land here if the user wants to update a metadata.
+ # server return a list of metadata dictionary
+ temp_server_metadata_dict = {}
+ for server_entry in server_value:
+ temp_server_metadata_dict[server_entry['key']] = server_entry['value']
+ if (value.items() <= temp_server_metadata_dict.items()) is False:
+ changed_attrs_dict[key] = value
+ else:
+ params.pop(key)
+
+ elif type(server_value) is dict and type(value) is dict:
+ if len(value) == 0:
+ continue
+ if (value.items() <= server_value.items()) is False:
+ changed_attrs_dict[key] = value
+ else:
+ params.pop(key)
+
+ elif type(server_value) is list and type(value) is list:
+ found_changed_list = False
+ if len(value) != len(server_value):
+ changed_attrs_dict[key] = value
+ continue
+ # check if the list has dictionary to compare
+ for entry_to_check in value:
+ if type(entry_to_check) is dict:
+ if is_dict_item_present_on_server(server_value, entry_to_check) is True:
+ continue
+ # no need to further check for other keys as we already got one mismatch
+ changed_attrs_dict[key] = value
+ found_changed_list = True
+ else:
+ if server_value.sort() != value.sort():
+ changed_attrs_dict[key] = value
+ found_changed_list = True
+ break
+ if found_changed_list is False:
+ params.pop(key)
+
+ elif server_value is None and type(value) is list:
+ # this is a special case wherein the user has provided an empty list and
+ # server already has null value for that list. in this case we should not add the
+ # argument to changed_attrs_dict
+ if len(value) == 0:
+ # don't add empty list for update
+ continue
+ changed_attrs_dict[key] = value
+ elif server_value != value:
+ # This is a special key used to force any operation for object.
+ # So, that is never updated as a server attribute.
+ if key != "force":
+ changed_attrs_dict[key] = value
+ else:
+ # remove this from param from dictionary as value is same and already present on server
+ params.pop(key)
+ return (changed_attrs_dict, params)
+
+
+def basic_auth_arg_fields():
+
+ fields = {
+ "host": {
+ "required": True,
+ "type": "str"
+ },
+ "username": {
+ "required": True,
+ "type": "str"
+ },
+ "password": {
+ "required": True,
+ "type": "str",
+ "no_log": True
+ }
+ }
+ return fields
+
+
+def get_vol_id(client_obj, vol_name):
+ if is_null_or_empty(vol_name):
+ return None
+ else:
+ resp = client_obj.volumes.get(name=vol_name)
+ if resp is None:
+ raise Exception(f"Invalid value for volume {vol_name}")
+ return resp.attrs.get("id")
+
+
+def get_volcoll_id(client_obj, volcoll_name):
+ if is_null_or_empty(volcoll_name):
+ return None
+ else:
+ resp = client_obj.volume_collections.get(name=volcoll_name)
+ if resp is None:
+ raise Exception(f"Invalid value for volcoll {volcoll_name}")
+ return resp.attrs.get("id")
+
+
+def get_owned_by_group_id(client_obj, owned_by_group_name):
+ if is_null_or_empty(owned_by_group_name):
+ return None
+ else:
+ resp = client_obj.groups.get(name=owned_by_group_name)
+ if resp is None:
+ raise Exception(f"Invalid value for owned by group {owned_by_group_name}")
+ return resp.attrs.get("id")
+
+
+def get_pool_id(client_obj, pool_name):
+ if is_null_or_empty(pool_name):
+ return None
+ else:
+ resp = client_obj.pools.get(name=pool_name)
+ if resp is None:
+ raise Exception(f"Invalid value for pool {pool_name}")
+ return resp.attrs.get("id")
+
+
+def get_folder_id(client_obj, folder_name):
+ if is_null_or_empty(folder_name):
+ return None
+ else:
+ resp = client_obj.folders.get(name=folder_name)
+ if resp is None:
+ raise Exception(f"Invalid value for folder {folder_name}")
+ return resp.attrs.get("id")
+
+
+def get_perfpolicy_id(client_obj, perfpolicy_name):
+ if is_null_or_empty(perfpolicy_name):
+ return None
+ else:
+ resp = client_obj.performance_policies.get(name=perfpolicy_name)
+ if resp is None:
+ raise Exception(f"Invalid value for performance policy: {perfpolicy_name}")
+ return resp.attrs.get("id")
+
+
+def get_prottmpl_id(client_obj, prottmpl_name):
+ if is_null_or_empty(prottmpl_name):
+ return None
+ else:
+ resp = client_obj.protection_templates.get(name=prottmpl_name)
+ if resp is None:
+ raise Exception(f"Invalid value for protection template {prottmpl_name}")
+ return resp.attrs.get("id")
+
+
+def get_chap_user_id(client_obj, chap_user_name):
+ if is_null_or_empty(chap_user_name):
+ return None
+ else:
+ resp = client_obj.chap_users.get(name=chap_user_name)
+ if resp is None:
+ raise Exception(f"Invalid value for chap user {chap_user_name}")
+ return resp.attrs.get("id")
+
+
+def get_pe_id(client_obj, pe_name):
+ if is_null_or_empty(pe_name):
+ return None
+ else:
+ resp = client_obj.protocol_endpoints.get(name=pe_name)
+ if resp is None:
+ raise Exception(f"Invalid value for protection endpoint {pe_name}")
+ return resp.attrs.get("id")
+
+
+def get_snapshot_id(client_obj, vol_name, snap_name):
+ if is_null_or_empty(vol_name) or is_null_or_empty(snap_name):
+ return None
+ else:
+ resp = client_obj.snapshots.get(vol_name=vol_name, name=snap_name)
+ if resp is None:
+ raise Exception(f"No snapshot with name '{snap_name}' found for volume {vol_name}.")
+ return resp.attrs.get("id")
+
+
+def get_replication_partner_id(client_obj, replication_partner_name):
+ if is_null_or_empty(replication_partner_name):
+ return None
+ else:
+ resp = client_obj.replication_partners.get(name=replication_partner_name)
+ if resp is None:
+ raise Exception(f"Invalid value for replication partner {replication_partner_name}")
+ return resp.attrs.get("id")
+
+
+def get_volcoll_or_prottmpl_id(client_obj, volcoll_name, prot_template_name):
+ if is_null_or_empty(volcoll_name) and is_null_or_empty(prot_template_name):
+ return None
+ if is_null_or_empty(volcoll_name) is False and is_null_or_empty(prot_template_name) is False:
+ raise Exception("Volcoll and prot_template are mutually exlusive. Please provide either one of them.")
+ else:
+ if volcoll_name is not None:
+ resp = get_volcoll_id(client_obj, volcoll_name)
+ if resp is None:
+ raise Exception(f"Invalid value for volcoll: {volcoll_name}")
+ elif prot_template_name is not None:
+ resp = get_prottmpl_id(client_obj, prot_template_name)
+ if resp is None:
+ raise Exception(f"Invalid value for protection template {prot_template_name}")
+ return resp
+
+
+def get_downstream_partner_id(client_obj, downstream_partner):
+ if is_null_or_empty(downstream_partner):
+ return None
+ else:
+ resp = client_obj.replication_partners.get(name=downstream_partner)
+ if resp is None:
+ raise Exception(f"Invalid value for downstream partner {downstream_partner}")
+ return resp.attrs.get("id")
+
+
+def get_initiator_group_id(client_obj, ig_name):
+ if is_null_or_empty(ig_name):
+ return None
+ else:
+ resp = client_obj.initiator_groups.get(name=ig_name)
+ if resp is None:
+ raise Exception(f"Invalid value for initiator group {ig_name}")
+ return resp.attrs.get("id")
+
+
+def is_array_version_above_or_equal(array_obj_client, arr_version_to_check):
+ if arr_version_to_check is None:
+ return False
+ resp = array_obj_client.get()
+ if resp is None:
+ return False
+ array_version = resp.attrs.get("version")
+ if array_version is None:
+ return False
+
+ temp = array_version.split('-')
+ array_version = temp[0]
+ arr_version = array_version.split('.')
+ version_to_check = arr_version_to_check.split('.')
+ if arr_version[0] > version_to_check[0]:
+ return True
+ elif arr_version[0] >= version_to_check[0] and arr_version[1] >= version_to_check[1]:
+ return True
+ elif arr_version[0] >= version_to_check[0] and arr_version[1] >= version_to_check[1] and arr_version[2] >= version_to_check[2]:
+ return True
+ return False
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_access_control_record.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_access_control_record.py
new file mode 100644
index 00000000..4ad3118d
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_access_control_record.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the access control records on an HPE Nimble Storage group.
+module: hpe_nimble_access_control_record
+options:
+ apply_to:
+ required: False
+ choices:
+ - volume
+ - snapshot
+ - both
+ type: str
+ description:
+ - The type of object to which this access control record applies.
+ chap_user:
+ required: False
+ type: str
+ description:
+ - Name for the CHAP user.
+ initiator_group:
+ required: True
+ type: str
+ description:
+ - The initiator group name.
+ lun:
+ required: False
+ type: int
+ description:
+ - If this access control record applies to a regular volume, this attribute is the volume's LUN (Logical Unit Number).
+ - If the access protocol is iSCSI, the LUN will be 0. However, if the access protocol is Fibre Channel, the LUN will be in the range from 0 to 2047.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The access control record operation.
+ volume:
+ required: True
+ type: str
+ description:
+ - The name of the volume that this access control record applies to.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage access control records
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# If state is "create", create access control record for given volume, fails if it exist.
+# if state is present, create access control record if not already present.
+- name: Create access control record for volume
+ hpe.nimble.hpe_nimble_access_control_record:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ volume: "{{ volume }}"
+ initiator_group: "{{ initiator_group }}"
+ state: "{{ state | default('present') }}"
+
+# Delete the access control record for a given volume name
+- name: Delete access control record for volume
+ hpe.nimble.hpe_nimble_access_control_record:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ volume: "{{ volume }}"
+ initiator_group: "{{ initiator_group }}"
+ state: "absent" # fail if volume does not exist
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_acr(
+ client_obj,
+ state,
+ initiator_group,
+ volume,
+ **kwargs):
+
+ if utils.is_null_or_empty(initiator_group):
+ return (False, False, "Access control record creation failed. No initiator group provided.", {})
+ if utils.is_null_or_empty(volume):
+ return (False, False, "Access control record creation failed. No volume name provided.", {})
+
+ try:
+ # see if the igroup is already present
+ ig_resp = client_obj.initiator_groups.get(id=None, name=initiator_group)
+ if ig_resp is None:
+ return (False, False, f"Initiator Group '{initiator_group}' is not present on array.", {})
+ vol_resp = client_obj.volumes.get(id=None, name=volume)
+ if vol_resp is None:
+ return (False, False, f"Volume name '{volume}' is not present on array.", {})
+
+ acr_resp = client_obj.access_control_records.get(vol_name=volume, initiator_group_name=initiator_group, apply_to=kwargs['apply_to'])
+ if utils.is_null_or_empty(acr_resp) is False:
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(acr_resp, **kwargs)
+ else:
+ params = utils.remove_null_args(**kwargs)
+ if acr_resp is None or changed_attrs_dict.__len__() > 0:
+ acr_resp = client_obj.access_control_records.create(initiator_group_id=ig_resp.attrs.get("id"),
+ vol_id=vol_resp.attrs.get("id"),
+ **params)
+ # params['volume'] = volume
+ # params['initiator_group'] = initiator_group
+ return (True, True, "Successfully created access control record.", acr_resp.attrs)
+ else:
+ # if state is set to present, we pass
+ if state == "present":
+ return (True, False, f"Access control record for volume '{volume}' with initiator group '{initiator_group}' is already present.",
+ acr_resp.attrs)
+ return (False, False, f"Access control record for volume '{volume}' with initiator group '{initiator_group}' cannot "
+ "be created as it is already present.", {})
+ except Exception as ex:
+ return (False, False, f"Access control record creation failed | {ex}", {})
+
+
+def delete_acr(
+ client_obj,
+ initiator_group,
+ volume,
+ **kwargs):
+
+ if utils.is_null_or_empty(initiator_group):
+ return (False, False, "Access control record deletion failed. No initiator group provided.")
+ if utils.is_null_or_empty(volume):
+ return (False, False, "Access control record deletion failed. No volume provided.")
+ params = utils.remove_null_args(**kwargs)
+
+ try:
+ acr_list_resp = client_obj.access_control_records.list(vol_name=volume, initiator_group_name=initiator_group, **params)
+ if acr_list_resp is not None and acr_list_resp.__len__() > 0:
+ for acr_resp in acr_list_resp:
+ client_obj.access_control_records.delete(acr_resp.attrs.get("id"))
+ return (True, True, f"Successfully deleted access control record for initiator group '{initiator_group}' associated with volume '{volume}'.")
+ else:
+ return (True, False, f"No access control record for initiator group '{initiator_group}' associated with volume '{volume}' found.")
+ except Exception as ex:
+ return (False, False, f"Access control record deletion failed | {ex}")
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present', 'absent', 'create'],
+ "type": "str"
+ },
+ "apply_to": {
+ "required": False,
+ "choices": ['volume', 'snapshot', 'both'],
+ "type": "str"
+ },
+ "chap_user": {
+ "required": False,
+ "type": "str"
+ },
+ "lun": {
+ "required": False,
+ "type": "int"
+ },
+ "volume": {
+ "required": True,
+ "type": "str"
+ },
+ "initiator_group": {
+ "required": True,
+ "type": "str"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ apply_to = module.params["apply_to"]
+ chap_user = module.params["chap_user"]
+ lun = module.params["lun"]
+ volume = module.params["volume"]
+ initiator_group = module.params["initiator_group"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ return_status, changed, msg, resp = create_acr(
+ client_obj,
+ state,
+ initiator_group,
+ volume,
+ apply_to=apply_to,
+ chap_user_id=utils.get_chap_user_id(client_obj, chap_user),
+ lun=lun)
+
+ elif state == "absent":
+ return_status, changed, msg = delete_acr(
+ client_obj,
+ initiator_group,
+ volume,
+ chap_user=chap_user)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_array.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_array.py
new file mode 100644
index 00000000..71f1c413
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_array.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the array on an HPE Nimble Storage group.
+module: hpe_nimble_array
+options:
+ allow_lower_limits:
+ required: False
+ type: bool
+ description:
+ - A True setting will allow you to add an array with lower limits to a pool with higher limits.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change the name of the existing array.
+ create_pool:
+ required: False
+ type: bool
+ description:
+ - Whether to create an associated pool during the array creation.
+ ctrlr_a_support_ip:
+ required: False
+ type: str
+ description:
+ - Controller A Support IP Address. Four numbers in the range (0,255) separated by periods.
+ ctrlr_b_support_ip:
+ required: False
+ type: str
+ description:
+ - Controller B Support IP Address. Four numbers in the range (0,255) separated by periods.
+ failover:
+ required: False
+ type: bool
+ description:
+ - Perform a failover on the specified array.
+ force:
+ required: False
+ type: bool
+ description:
+ - Forcibly delete the specified array.
+ halt:
+ required: False
+ type: bool
+ description:
+ - Halt the specified array. Restarting the array will require physically powering it back on.
+ name:
+ required: True
+ type: str
+ description:
+ - The user-provided name of the array. It is also the array's hostname.
+ nic_list:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List NICs information. Used when creating an array.
+ pool_description:
+ required: False
+ type: str
+ description:
+ - Text description of the pool to be created during array creation.
+ pool_name:
+ required: False
+ type: str
+ description:
+ - Name of pool to which this is a member.
+ reboot:
+ required: False
+ type: bool
+ description:
+ - Reboot the specified array.
+ secondary_mgmt_ip:
+ required: False
+ type: str
+ description:
+ - Secondary management IP address for the group.
+ serial:
+ required: False
+ type: str
+ description:
+ - Serial number of the array.
+ state:
+ required: True
+ choices:
+ - create
+ - present
+ - absent
+ type: str
+ description:
+ - The array operation
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage array
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a array if not present. Fails if already present.
+# if state is present, then create a array if not present. Succeed if it already exists.
+- name: Create array if not present
+ hpe.nimble.hpe_nimble_array:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: "{{ state | default('present') }}"
+ name: "{{ name }}"
+ ctrlr_b_support_ip: "{{ ctrlr_b_support_ip | mandatory}}"
+ ctrlr_a_support_ip: "{{ ctrlr_a_support_ip | mandatory}}"
+ serial: "{{ serial | mandatory}}"
+ nic_list: "{{ nic_list | mandatory}}"
+ pool_name: "{{ pool_name | mandatory}}"
+
+- name: Delete array
+ hpe.nimble.hpe_nimble_array:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ vol_name: "{{ansible_default_ipv4['address']}}-{{ vol_name }}"
+ name: "{{ name }}"
+ state: absent
+
+- name: Failover array
+ hpe.nimble.hpe_nimble_array:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ failover: true
+ state: present
+
+- name: Halt array
+ hpe.nimble.hpe_nimble_array:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: present
+ halt: true
+
+- name: Reboot array
+ hpe.nimble.hpe_nimble_array:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: present
+ reboot: true
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_array(
+ client_obj,
+ array_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(array_name):
+ return (False, False, "Create array failed as array name is not present.", {}, {})
+
+ try:
+ array_resp = client_obj.arrays.get(id=None, name=array_name)
+ if utils.is_null_or_empty(array_resp):
+ params = utils.remove_null_args(**kwargs)
+ array_resp = client_obj.arrays.create(name=array_name, **params)
+ if array_resp is not None:
+ return (True, True, f"Created array '{array_name}' successfully.", {}, array_resp.attrs)
+ else:
+ return (False, False, f"Array '{array_name}' cannot be created as it is already present", {}, array_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Array creation failed |{ex}", {}, {})
+
+
+def update_array(
+ client_obj,
+ array_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(array_resp):
+ return (False, False, "Update array failed as array name is not present.", {}, {})
+
+ try:
+ array_name = array_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(array_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ array_resp = client_obj.arrays.update(id=array_resp.attrs.get("id"), **params)
+ return (True, True, f"Array '{array_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, array_resp.attrs)
+ else:
+ return (True, False, f"Array '{array_name}' already present in given state.", {}, array_resp.attrs)
+
+ except Exception as ex:
+ return (False, False, f"Array update failed |{ex}", {}, {})
+
+
+def delete_array(
+ client_obj,
+ array_name):
+
+ if utils.is_null_or_empty(array_name):
+ return (False, False, "Delete array failed as array name is not present.", {})
+
+ try:
+ array_resp = client_obj.arrays.get(id=None, name=array_name)
+ if utils.is_null_or_empty(array_resp):
+ return (False, False, f"Array '{array_name}' cannot be deleted as it is not present.", {})
+ else:
+ array_resp = client_obj.arrays.delete(id=array_resp.attrs.get("id"))
+ return (True, True, f"Deleted array '{array_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Array deletion failed |{ex}", {})
+
+
+def failover_array(
+ client_obj,
+ array_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(array_name):
+ return (False, False, "Failover array failed as array name is not present.", {})
+ try:
+ array_resp = client_obj.arrays.get(id=None, name=array_name)
+ if utils.is_null_or_empty(array_resp):
+ return (False, False, f"Array '{array_name}' cannot failover as it is not present.", {})
+ else:
+ params = utils.remove_null_args(**kwargs)
+ array_resp = client_obj.arrays.failover(id=array_resp.attrs.get("id"), **params)
+ return (True, True, f"Failover array '{array_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Array failover failed |{ex}", {})
+
+
+def halt_array(
+ client_obj,
+ array_name):
+
+ if utils.is_null_or_empty(array_name):
+ return (False, False, "Halt array failed as array name is not present.", {})
+
+ try:
+ array_resp = client_obj.arrays.get(id=None, name=array_name)
+ if utils.is_null_or_empty(array_resp):
+ return (False, False, f"Array '{array_name}' cannot be halted as it is not present.", {})
+ else:
+ array_resp = client_obj.arrays.halt(id=array_resp.attrs.get("id"))
+ return (True, True, f"Halted array '{array_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Array Halt failed |{ex}", {})
+
+
+def reboot_array(
+ client_obj,
+ array_name):
+
+ if utils.is_null_or_empty(array_name):
+ return (False, False, "Reboot array failed as array name is not present.", {})
+
+ try:
+ array_resp = client_obj.arrays.get(id=None, name=array_name)
+ if utils.is_null_or_empty(array_resp):
+ return (False, False, f"Array '{array_name}' cannot be rebooted as it is not present.", {})
+ else:
+ array_resp = client_obj.arrays.reboot(id=array_resp.attrs.get("id"))
+ return (True, True, f"Rebooted array '{array_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Array reboot failed |{ex}", {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['create', 'present', 'absent'],
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "pool_name": {
+ "required": False,
+ "type": "str"
+ },
+ "serial": {
+ "required": False,
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "create_pool": {
+ "required": False,
+ "type": "bool"
+ },
+ "pool_description": {
+ "required": False,
+ "type": "str"
+ },
+ "allow_lower_limits": {
+ "required": False,
+ "type": "bool"
+ },
+ "ctrlr_a_support_ip": {
+ "required": False,
+ "type": "str"
+ },
+ "ctrlr_b_support_ip": {
+ "required": False,
+ "type": "str"
+ },
+ "nic_list": {
+ "required": False,
+ "type": 'list',
+ "elements": 'dict'
+ },
+ "secondary_mgmt_ip": {
+ "required": False,
+ "type": "str"
+ },
+ "force": {
+ "required": False,
+ "type": "bool"
+ },
+ "failover": {
+ "required": False,
+ "type": "bool"
+ },
+ "halt": {
+ "required": False,
+ "type": "bool"
+ },
+ "reboot": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ array_name = module.params["name"]
+ change_name = module.params["change_name"]
+ pool_name = module.params["pool_name"]
+ serial = module.params["serial"]
+ create_pool = module.params["create_pool"]
+ pool_description = module.params["pool_description"]
+ allow_lower_limits = module.params["allow_lower_limits"]
+ ctrlr_a_support_ip = module.params["ctrlr_a_support_ip"]
+ ctrlr_b_support_ip = module.params["ctrlr_b_support_ip"]
+ nic_list = module.params["nic_list"]
+ secondary_mgmt_ip = module.params["secondary_mgmt_ip"]
+ force = module.params["force"]
+ failover = module.params["failover"]
+ halt = module.params["halt"]
+ reboot = module.params["reboot"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "present" and failover is True:
+ return_status, changed, msg, changed_attrs_dict = failover_array(client_obj, array_name, force=force)
+
+ elif state == "present" and halt is True:
+ return_status, changed, msg, changed_attrs_dict = halt_array(client_obj, array_name)
+
+ elif state == "present" and reboot is True:
+ return_status, changed, msg, changed_attrs_dict = reboot_array(client_obj, array_name)
+
+ elif ((failover is None or failover is False)
+ and (halt is None or halt is False)
+ and (reboot is None or reboot is False)
+ and (state == "create" or state == "present")):
+
+ array_resp = client_obj.arrays.get(name=array_name)
+ if array_resp is None or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_array(
+ client_obj,
+ array_name,
+ pool_name=pool_name,
+ serial=serial,
+ create_pool=create_pool,
+ pool_description=pool_description,
+ allow_lower_limits=allow_lower_limits,
+ ctrlr_a_support_ip=ctrlr_a_support_ip,
+ ctrlr_b_support_ip=ctrlr_b_support_ip,
+ nic_list=nic_list,
+ secondary_mgmt_ip=secondary_mgmt_ip,
+ force=force,
+ failover=failover,
+ halt=halt,
+ reboot=reboot)
+ else:
+ return_status, changed, msg, changed_attrs_dict, resp = update_array(
+ client_obj,
+ array_resp,
+ name=change_name,
+ force=force)
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_array(client_obj, array_name)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_chap_user.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_chap_user.py
new file mode 100644
index 00000000..2e75898b
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_chap_user.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+
+# # Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the CHAP user on an HPE Nimble Storage group.
+module: hpe_nimble_chap_user
+options:
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change the name of the existing CHAP user.
+ description:
+ required: False
+ type: str
+ description:
+ - Text description of CHAP user.
+ initiator_iqns:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of iSCSI initiators. To be configured with this CHAP user for iSCSI Group Target CHAP authentication. This attribute
+ cannot be modified at the same time with other attributes. If any specified initiator is already associated with another CHAP
+ user, it will be replaced by this CHAP user for future CHAP authentication.
+ name:
+ required: True
+ type: str
+ description:
+ - The CHAP user name.
+ state:
+ required: True
+ choices:
+ - create
+ - present
+ - absent
+ type: str
+ description:
+ - The CHAP user operation.
+ user_password:
+ required: False
+ type: str
+ description:
+ - CHAP secret. The CHAP secret should be between 12-16 characters and cannot contain spaces or most punctuation.
+ string of 12 to 16 printable ASCII characters excluding ampersand and ^[];`
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage CHAP user
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create, then create chap user, fails if it exist or cannot create
+# if state is present, then create chap user if not present, else success
+- name: Create Chap User
+ hpe.nimble.hpe_nimble_chap_user:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ description: "{{ description }}"
+ user_password: "{{ user_password | mandatory }}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete Chap User
+ hpe.nimble.hpe_nimble_chap_user:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "absent"
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_chap_user(
+ client_obj,
+ user_name,
+ password,
+ **kwargs):
+
+ if utils.is_null_or_empty(user_name):
+ return (False, False, "Create chap user failed as user is not present.", {}, {})
+ if utils.is_null_or_empty(password):
+ return (False, False, "Create chap user failed as password is not present.", {}, {})
+
+ try:
+ user_resp = client_obj.chap_users.get(id=None, name=user_name)
+ if utils.is_null_or_empty(user_resp):
+ params = utils.remove_null_args(**kwargs)
+ user_resp = client_obj.chap_users.create(name=user_name, password=password, **params)
+ return (True, True, f"Chap user '{user_name}' created successfully.", {}, user_resp.attrs)
+ else:
+ return (False, False, f"Chap user '{user_name}' cannot be created as it is already present in given state.", {}, user_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Chap user creation failed |{ex}", {}, {})
+
+
+def update_chap_user(
+ client_obj,
+ user_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(user_name):
+ return (False, False, "Update chap user failed as user is not present.", {}, {})
+
+ try:
+ user_resp = client_obj.chap_users.get(id=None, name=user_name)
+ if utils.is_null_or_empty(user_resp):
+ return (False, False, f"Chap user '{user_name}' cannot be updated as it is not present.", {}, {})
+
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(user_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ user_resp = client_obj.chap_users.update(id=user_resp.attrs.get("id"), **params)
+ return (True, True, f"Chap user '{user_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, user_resp.attrs)
+ else:
+ return (True, False, f"Chap user '{user_resp.attrs.get('name')}' already present in given state.", {}, user_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Chap user update failed |{ex}", {}, {})
+
+
+def delete_chap_user(
+ client_obj,
+ user_name):
+
+ if utils.is_null_or_empty(user_name):
+ return (False, False, "Delete chap user failed as user is not present.", {})
+
+ try:
+ user_resp = client_obj.chap_users.get(id=None, name=user_name)
+ if utils.is_null_or_empty(user_resp):
+ return (False, False, f"Chap user '{user_name}' cannot be deleted as it is not present.", {})
+
+ client_obj.chap_users.delete(id=user_resp.attrs.get("id"))
+ return (True, True, f"Deleted chap user '{user_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Delete chap user failed |{ex}", {})
+
+
+def main():
+
+ fields = {
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "initiator_iqns": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "user_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "state": {
+ "required": True,
+ "choices": ['create',
+ 'present',
+ 'absent'
+ ],
+ "type": "str"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['user_password'])]
+
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ initiator_iqns = module.params["initiator_iqns"]
+ user_name = module.params["name"]
+ user_password = module.params["user_password"]
+ state = module.params["state"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ if not client_obj.chap_users.get(id=None, name=user_name) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_chap_user(
+ client_obj,
+ user_name,
+ user_password,
+ description=description)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_chap_user(
+ client_obj,
+ user_name,
+ name=change_name,
+ password=user_password,
+ description=description,
+ initiator_iqns=initiator_iqns)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_chap_user(client_obj, user_name)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_disk.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_disk.py
new file mode 100644
index 00000000..3d5e90d1
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_disk.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage disks on an HPE Nimble Storage group.
+module: hpe_nimble_disk
+options:
+ disk_op:
+ required: True
+ choices:
+ - add
+ - remove
+ type: str
+ description:
+ - The intended operation to be performed on the specified disk.
+ force:
+ type: bool
+ description:
+ - Forcibly add a disk.
+ shelf_location:
+ required: True
+ type: str
+ description:
+ - Position of the shelf the disk belongs to.
+ slot:
+ required: True
+ type: int
+ description:
+ - Disk slot number.
+ state:
+ required: True
+ choices:
+ - present
+ type: str
+ description:
+ - The disk operation.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage disk
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+- name: Update Disk
+ hpe.nimble.hpe_nimble_disk:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ slot: "{{ slot | mandatory }}"
+ shelf_location: "{{ shelf_location | mandatory }}"
+ disk_op: "{{ disk_op | mandatory }}"
+ force: "{{ force }}"
+ state: present
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def update_disk(
+ client_obj,
+ slot,
+ shelf_location,
+ **kwargs):
+
+ if utils.is_null_or_empty(shelf_location):
+ return (False, False, "Disk update failed as no shelf location provided.", {}, {})
+
+ try:
+ # get the details of the disk for a given slot and shelf_location
+ disk_resp = client_obj.disks.list(detail=True)
+ if disk_resp is None:
+ return (False, False, "No Disk is present on array.", {}, {})
+ else:
+ disk_id = None
+ changed_attrs_dict = {}
+ for disk_obj in disk_resp:
+ if slot == disk_obj.attrs.get("slot") and shelf_location == disk_obj.attrs.get("shelf_location"):
+ disk_id = disk_obj.attrs.get("id")
+ break
+ params = utils.remove_null_args(**kwargs)
+ disk_resp = client_obj.disks.update(id=disk_id, **params)
+ if hasattr(disk_resp, 'attrs'):
+ disk_resp = disk_resp.attrs
+ changed_attrs_dict['slot'] = slot
+ changed_attrs_dict['shelf_location'] = shelf_location
+ return (True, True, f"Successfully updated disk to slot '{slot}' at shelf location '{shelf_location}'.", changed_attrs_dict, disk_resp)
+ except Exception as ex:
+ return (False, False, f"Disk update failed |'{ex}'", {}, {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present'],
+ "type": "str"
+ },
+ "disk_op": {
+ "required": True,
+ "choices": ['add', 'remove'],
+ "type": "str"
+ },
+ "slot": {
+ "required": True,
+ "type": "int"
+ },
+ "shelf_location": {
+ "required": True,
+ "type": "str"
+ },
+ "force": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ disk_op = module.params["disk_op"]
+ slot = module.params["slot"]
+ shelf_location = module.params["shelf_location"]
+ force = module.params["force"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "present":
+ return_status, changed, msg, changed_attrs_dict, resp = update_disk(
+ client_obj,
+ slot,
+ shelf_location,
+ disk_op=disk_op,
+ force=force)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_encryption.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_encryption.py
new file mode 100644
index 00000000..c2c96c38
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_encryption.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+
+# # Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the encryption on an Nimble Storage group.
+module: hpe_nimble_encryption
+options:
+ active:
+ type: bool
+ description:
+ - Whether the master key is active or not.
+ age:
+ required: False
+ type: int
+ description:
+ - Minimum age (in hours) of inactive encryption keys to be purged. '0' indicates to purge the keys immediately.
+ encryption_config:
+ required: False
+ type: dict
+ description:
+ - How encryption is configured for this group. Group encryption settings.
+ group_encrypt:
+ required: False
+ type: bool
+ description:
+ - Flag for setting group encryption.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the master key. The only allowed value is "default".
+ passphrase:
+ required: False
+ type: str
+ description:
+ - Passphrase used to protect the master key, required during creation, enabling/disabling the key and change the
+ passphrase to a new value. String with size from 8 to 64 printable characters.
+ purge_inactive:
+ required: False
+ type: bool
+ description:
+ - Purges encryption keys that have been inactive for the age or longer. If you do not specify an age, the keys will be purged immediately.
+ new_passphrase:
+ required: False
+ type: str
+ description:
+ - When changing the passphrase, this attribute specifies the new value of the passphrase. String with size from 8 to 64 printable characters.
+ state:
+ required: True
+ choices:
+ - create
+ - present
+ - absent
+ type: str
+ description:
+ - The encryption operation.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage encryption
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create, then create master key, fails if it exist or cannot create
+# if state is present, then create master key if not present ,else success
+- name: Create master key
+ hpe.nimble.hpe_nimble_encryption:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "default"
+ passphrase: "{{ passphrase }}"
+ active: "{{ active | default('false') }}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete master key
+ hpe.nimble.hpe_nimble_encryption:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "default"
+ state: "absent"
+
+- name: Purge inactive master key
+ hpe.nimble.hpe_nimble_encryption:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "default"
+ age: "{{ age | mandatory }}"
+ state: "present"
+ purge_inactive: true
+
+- name: Group encryption
+ hpe.nimble.hpe_nimble_encryption:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ encryption_config: "{{ encryption_config | mandatory }}"
+ state: "present"
+ group_encrypt: true
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_master_key(
+ client_obj,
+ master_key,
+ passphrase):
+
+ if utils.is_null_or_empty(master_key):
+ return (False, False, "Create master key failed as no key is provided.", {}, {})
+
+ try:
+ master_key_resp = client_obj.master_key.get(id=None, name=master_key)
+ if utils.is_null_or_empty(master_key_resp):
+ master_key_resp = client_obj.master_key.create(name=master_key, passphrase=passphrase)
+ return (True, True, f"Master key '{master_key}' created successfully.", {}, master_key_resp.attrs)
+ else:
+ return (False, False, f"Master key '{master_key}' cannot be created as it is already present in given state.", {}, master_key_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Master key creation failed |{ex}", {}, {})
+
+
+def update_master_key(
+ client_obj,
+ master_key,
+ **kwargs):
+
+ if utils.is_null_or_empty(master_key):
+ return (False, False, "Update master key failed as master key is not present.", {}, {})
+
+ try:
+ master_key_resp = client_obj.master_key.get(id=None, name=master_key)
+ if utils.is_null_or_empty(master_key_resp):
+ return (False, False, f"Master key '{master_key}' cannot be updated as it is not present.", {}, {})
+
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(master_key_resp, **kwargs)
+ changed_attrs_dict.pop('passphrase')
+ if changed_attrs_dict.__len__() > 0:
+ master_key_resp = client_obj.master_key.update(id=master_key_resp.attrs.get("id"), name=master_key, **params)
+ return (True, True, f"Master key '{master_key}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, master_key_resp.attrs)
+ else:
+ return (True, False, f"Master key '{master_key}' already present in given state.", {}, master_key_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Master key update failed |{ex}", {}, {})
+
+
+def delete_master_key(
+ client_obj,
+ master_key):
+
+ if utils.is_null_or_empty(master_key):
+ return (False, False, "Delete master key failed as master key is not present.", {})
+
+ try:
+ master_key_resp = client_obj.master_key.get(id=None, name=master_key)
+ if utils.is_null_or_empty(master_key_resp):
+ return (False, False, f"Master key '{master_key}' cannot be deleted as it is not present.", {})
+
+ client_obj.master_key.delete(id=master_key_resp.attrs.get("id"))
+ return (True, True, f"Deleted master key '{master_key}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Delete master key failed |{ex}", {})
+
+
+def purge_inactive_key(
+ client_obj,
+ master_key,
+ **kwargs):
+
+ if utils.is_null_or_empty(master_key):
+ return (False, False, "Purge inactive master key failed as master key is not present.", {})
+
+ try:
+ master_key_resp = client_obj.master_key.get(id=None, name=master_key)
+ if utils.is_null_or_empty(master_key_resp):
+ return (False, False, f"Master key '{master_key}' cannot be purged as it is not present.", {})
+
+ params = utils.remove_null_args(**kwargs)
+ client_obj.master_key.purge_inactive(id=master_key_resp.attrs.get("id"), **params)
+ return (True, True, f"Purged inactive master key '{master_key}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Purge inactive master key failed |{ex}", {})
+
+
+def group_encryption(
+ client_obj,
+ group_name,
+ encryption_config):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Encryption setting for group failed as group name is not present.", {}, {})
+
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Encryption setting for group '{group_name}' cannot be done as it is not present.", {}, {})
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(group_resp, encryption_config=encryption_config)
+ if changed_attrs_dict.__len__() > 0:
+ group_resp = client_obj.groups.update(id=group_resp.attrs.get("id"), encryption_config=encryption_config)
+ return (True, True, f"Encryption setting for group '{group_name}' changed successfully. ", changed_attrs_dict, group_resp.attrs)
+ else:
+ return (True, False, f"Encryption setting for group '{group_resp.attrs.get('name')}' is already in same state.", {}, group_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Encryption setting for group failed |{ex}", {}, {})
+
+
+def main():
+
+ fields = {
+ "active": {
+ "required": False,
+ "type": "bool"
+ },
+ "age": {
+ "required": False,
+ "type": "int"
+ },
+ "encryption_config": {
+ "required": False,
+ "type": "dict"
+ },
+ "group_encrypt": {
+ "required": False,
+ "type": "bool"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "passphrase": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "purge_inactive": {
+ "required": False,
+ "type": "bool"
+ },
+ "new_passphrase": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "state": {
+ "required": True,
+ "choices": ['create',
+ 'present',
+ 'absent'
+ ],
+ "type": "str"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['passphrase'])]
+
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ active = module.params["active"]
+ age = module.params["age"]
+ encryption_config = module.params["encryption_config"]
+ group_encrypt = module.params["group_encrypt"]
+ master_key = module.params["name"]
+ passphrase = module.params["passphrase"]
+ purge_inactive = module.params["purge_inactive"]
+ new_passphrase = module.params["new_passphrase"]
+ state = module.params["state"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if ((purge_inactive is None or purge_inactive is False)
+ and (group_encrypt is None or group_encrypt is False)
+ and (state == "create" or state == "present")):
+ if not client_obj.master_key.get(id=None, name=master_key) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_master_key(
+ client_obj,
+ master_key,
+ passphrase)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_master_key(
+ client_obj,
+ master_key,
+ active=active,
+ passphrase=passphrase,
+ new_passphrase=new_passphrase)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_master_key(client_obj, master_key)
+
+ elif state == "present" and purge_inactive is True:
+ return_status, changed, msg, changed_attrs_dict = purge_inactive_key(
+ client_obj,
+ master_key,
+ age=age)
+
+ elif state == "present" and group_encrypt is True:
+ group_name = module.params["name"]
+ return_status, changed, msg, changed_attrs_dict, resp = group_encryption(
+ client_obj,
+ group_name,
+ encryption_config)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_fc.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_fc.py
new file mode 100644
index 00000000..0c69d0fb
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_fc.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage storage Fibre Channel on an HPE Nimble Storage group.
+module: hpe_nimble_fc
+options:
+ array_name_or_serial:
+ required: True
+ type: str
+ description:
+ - Name or serial number of array where the interface is hosted.
+ controller:
+ required: False
+ type: str
+ description:
+ - Name (A or B) of the controller where the interface is hosted.
+ hw_upgrade:
+ required: False
+ type: bool
+ description:
+ - Update fibre channel configuration after hardware changes. Possible values:- 'true' 'false'.
+ name:
+ required: False
+ type: str
+ description:
+ - Name of fibre channel interface
+ online:
+ required: False
+ type: bool
+ description:
+ - Identify whether the fibre channel interface is online. Possible values:- 'true' 'false'.
+ precheck:
+ required: False
+ type: bool
+ description:
+ - Check if the interfaces are offline before regenerating the WWNN. Possible values:- 'true' 'false'.
+ regenerate:
+ required: False
+ type: bool
+ description:
+ - Regenerate fibre channel configuration. Possible values:- 'true' 'false'.
+ state:
+ required: True
+ choices:
+ - present
+ type: str
+ description:
+ - The fibre channel operation.
+ wwnn_base_str:
+ required: False
+ type: str
+ description:
+ - Base WWNN. Six bytes expressed in hex separated by colons. Example:- 'af:32:f1'.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage Fibre Channel
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+- name: Update fibre channel interface
+ hpe.nimble.hpe_nimble_fc:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ array_name_or_serial: "{{ array_name_or_serial | mandatory }}"
+ name: "{{ name | mandatory }}"
+ controller: "{{ controller | mandatory }}"
+ online: "{{ online | mandatory }}"
+ state: "{{ 'present' }}"
+
+- name: Regenerate fibre channel config
+ hpe.nimble.hpe_nimble_fc:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ array_name_or_serial: "{{ array_name_or_serial | mandatory }}" # provide the group_leader_array name
+ wwnn_base_str: "{{ wwnn_base_str | mandatory }}"
+ regenerate: true
+ precheck: true
+ state: "{{ 'present' }}"
+
+- name: Hardware upgrade for fibre channel
+ hpe.nimble.hpe_nimble_fc:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ array_name_or_serial: "{{ array_name_or_serial | mandatory }}"
+ hw_upgrade: true
+ state: "{{ 'present' }}"
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def update_fc_interface(
+ client_obj,
+ array_name_or_serial,
+ fc_name,
+ controller,
+ online):
+
+ if utils.is_null_or_empty(array_name_or_serial):
+ return (False, False, "Fibre channel interface update failed as no array name is provided.", {}, {})
+ if utils.is_null_or_empty(fc_name):
+ return (False, False, "Fibre channel interface update failed as no interface name is provided.", {}, {})
+ if utils.is_null_or_empty(controller):
+ return (False, False, "Fibre channel interface update failed as no controller name is provided.", {}, {})
+ if utils.is_null_or_empty(online):
+ return (False, False, "Fibre channel interface update failed as online flag is not provided.", {}, {})
+ try:
+ # get the details of the fc
+ fc_resp = client_obj.fibre_channel_interfaces.list(detail=True, array_name_or_serial=array_name_or_serial)
+ if fc_resp is None:
+ return (False, False, f"No fibre channel is present for array '{array_name_or_serial}'.", {}, {})
+ else:
+ fc_result = None
+ for fc_interface_obj in fc_resp:
+ if fc_interface_obj.attrs.get("name") == fc_name and fc_interface_obj.attrs.get("controller_name") == controller:
+ fc_result = fc_interface_obj
+ break
+ if fc_result is not None:
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(fc_result, online=online)
+ if changed_attrs_dict.__len__() > 0:
+ fc_result = client_obj.fibre_channel_interfaces.update(id=fc_result.attrs.get("id"), online=online)
+ if hasattr(fc_result, 'attrs'):
+ fc_result = fc_result.attrs
+ return (True, True, "Updated fibre channel interface.", {}, fc_result)
+ else:
+ if hasattr(fc_result, 'attrs'):
+ fc_result = fc_result.attrs
+ return (True, False, "Fibre channel interface already in given state.", {}, fc_result)
+ except Exception as ex:
+ return (False, False, f"Fibre channel update failed |'{ex}'", {}, {})
+
+
+def regenerate_wwn(
+ client_obj,
+ array_name_or_serial,
+ wwnn_base_str,
+ precheck):
+
+ if utils.is_null_or_empty(array_name_or_serial):
+ return (False, False, "Fibre channel config update failed as no array name is provided.", {}, {})
+ try:
+ # get the details of the fc config
+ fc_config_resp = client_obj.fibre_channel_configs.get(id=None, group_leader_array=array_name_or_serial)
+ if fc_config_resp is None:
+ return (False, False, f"No fibre channel config is present for array '{array_name_or_serial}'.", {}, {})
+ else:
+ changed_attrs_dict = {}
+ fc_config_resp = client_obj.fibre_channel_configs.regenerate(fc_config_resp.attrs.get("id"), precheck, wwnn_base_str)
+ if hasattr(fc_config_resp, 'attrs'):
+ fc_config_resp = fc_config_resp.attrs
+ changed_attrs_dict['wwnn_base_str'] = wwnn_base_str
+ return (True, True, f"Updated fibre channel config for group leader array '{array_name_or_serial}'."
+ f"Modified the following fields '{changed_attrs_dict}'.", changed_attrs_dict, fc_config_resp)
+ except Exception as ex:
+ return (False, False, f"Fibre channel config update failed |'{ex}'", {}, {})
+
+
+def upgrade_hardware(
+ client_obj,
+ array_name_or_serial):
+
+ if utils.is_null_or_empty(array_name_or_serial):
+ return (False, False, "Hardware update failed as no array name is provided.", {}, {})
+ try:
+ # get the details of the fc config
+ fc_config_resp = client_obj.fibre_channel_configs.get(id=None, group_leader_array=array_name_or_serial)
+ if fc_config_resp is None:
+ return (False, False, f"No fibre channel config is present for array '{array_name_or_serial}'.", {}, {})
+ else:
+ fc_config_resp = client_obj.fibre_channel_configs.hw_upgrade(fc_config_resp.attrs.get("id"))
+ if hasattr(fc_config_resp, 'attrs'):
+ fc_config_resp = fc_config_resp.attrs
+ return (True, True, f"Hardware update for group leader array '{array_name_or_serial}' done successfully", {}, fc_config_resp)
+ except Exception as ex:
+ return (False, False, f"Hardware update failed |'{ex}'", {}, {})
+
+
+def main():
+
+ fields = {
+ "array_name_or_serial": {
+ "required": True,
+ "type": "str"
+ },
+ "controller": {
+ "required": False,
+ "type": "str"
+ },
+ "hw_upgrade": {
+ "required": False,
+ "type": "bool"
+ },
+ "name": {
+ "required": False,
+ "type": "str"
+ },
+ "online": {
+ "required": False,
+ "type": "bool"
+ },
+ "precheck": {
+ "required": False,
+ "type": "bool"
+ },
+ "regenerate": {
+ "required": False,
+ "type": "bool"
+ },
+ "state": {
+ "required": True,
+ "choices": ['present'],
+ "type": "str"
+ },
+ "wwnn_base_str": {
+ "required": False,
+ "type": "str"
+ },
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ array_name_or_serial = module.params["array_name_or_serial"]
+ controller = module.params["controller"]
+ hw_upgrade = module.params["hw_upgrade"]
+ fc_name = module.params["name"]
+ online = module.params["online"]
+ precheck = module.params["precheck"]
+ regenerate = module.params["regenerate"]
+ state = module.params["state"]
+ wwnn_base_str = module.params["wwnn_base_str"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "present":
+ if regenerate is True:
+ return_status, changed, msg, changed_attrs_dict, resp = regenerate_wwn(
+ client_obj,
+ array_name_or_serial,
+ wwnn_base_str,
+ precheck)
+
+ elif hw_upgrade is True:
+ return_status, changed, msg, changed_attrs_dict, resp = upgrade_hardware(
+ client_obj,
+ array_name_or_serial)
+
+ else:
+ return_status, changed, msg, changed_attrs_dict, resp = update_fc_interface(
+ client_obj,
+ array_name_or_serial,
+ fc_name,
+ controller,
+ online)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_group.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_group.py
new file mode 100644
index 00000000..1c74b16a
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_group.py
@@ -0,0 +1,1257 @@
+#!/usr/bin/python
+
+# # Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage an HPE Nimble Storage group on an Nimble Storage array.
+module: hpe_nimble_group
+options:
+ alarms:
+ required: False
+ type: bool
+ description:
+ - Whether alarm feature is enabled.
+ alert_to_email_addrs:
+ required: False
+ type: str
+ description:
+ - Comma-separated list of email addresses to receive emails. Comma separated email list.
+ alert_from_email_addrs:
+ required: False
+ type: str
+ description:
+ - From email address to use while sending emails. Case insensitive email address.
+ alert_min_level:
+ required: False
+ choices:
+ - info
+ - notice
+ - warning
+ - critical
+ type: str
+ description:
+ - Minimum level of alert to be notified.
+ allow_analytics_gui:
+ required: False
+ type: bool
+ description:
+ - Specify whether to allow HPE Nimble Storage to use Google Analytics in the GUI. HPE Nimble Storage uses Google Analytics
+ to gather data related to GUI usage. The data gathered is used to evaluate and improve the product.
+ allow_support_tunnel:
+ required: False
+ type: bool
+ description:
+ - Whether to allow support tunnel.
+ auto_switchover:
+ required: False
+ type: bool
+ description:
+ - Whether automatic switchover of Group management services feature is enabled.
+ autoclean_unmanaged_snapshots:
+ required: False
+ type: bool
+ description:
+ - Whether autoclean unmanaged snapshots feature is enabled.
+ autoclean_unmanaged_snapshots_ttl_unit:
+ required: False
+ type: int
+ description:
+ - Unit for unmanaged snapshot time to live.
+ autosupport:
+ required: False
+ type: bool
+ description:
+ - Whether to send autosupport.
+ cc_mode:
+ required: False
+ type: bool
+ description:
+ - Enable or disable Common Criteria mode.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing group.
+ check_migrate:
+ required: False
+ type: bool
+ description:
+ - Check if the group Management Service can be migrated to the group Management Service backup array.
+ date:
+ required: False
+ type: int
+ description:
+ - Unix epoch time local to the group. Seconds since last epoch. Example- 3400.
+ default_iscsi_target_scope:
+ required: False
+ choices:
+ - volume
+ - group
+ type: str
+ description:
+ - Newly created volumes are exported under iSCSI Group Target or iSCSI Volume Target.
+ default_volume_limit:
+ required: False
+ type: int
+ description:
+ - Default limit for a volume space usage as a percentage of volume size. Volume will be taken offline/made non-writable on exceeding its
+ limit. Percentage as integer from 0 to 100.
+ domain_name:
+ required: False
+ type: str
+ description:
+ - Domain name for this group. String of alphanumeric characters, valid range is from 2 to 255; Each label must be between 1 and 63 characters
+ long; - and . are allowed after the first and before the last character.
+ dns_servers:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - IP addresses for this group's dns servers.
+ fc_enabled:
+ required: False
+ type: bool
+ description:
+ - Whether FC is enabled on this group.
+ force:
+ required: False
+ type: bool
+ default: False
+ description:
+ - Can be used with halt or merge flag. Halt remaining arrays when one or more is unreachable.
+ Ignore warnings and forcibly merge specified group with this group.
+ group_snapshot_ttl:
+ required: False
+ type: int
+ description:
+ - Snapshot Time-to-live(TTL) configured at group level for automatic deletion of unmanaged snapshots. Value 0 indicates unlimited TTL.
+ group_target_enabled:
+ required: False
+ type: bool
+ description:
+ - Is group_target enabled on this group.
+ group_target_name:
+ required: False
+ type: str
+ description:
+ - Iscsi target name for this group. String of up to 255 alphanumeric, hyphenated, colon, or period-separated characters;
+ but cannot begin with hyphen, colon or period. This type is used for the group target name.
+ halt:
+ required: False
+ type: bool
+ description:
+ - Halt all arrays in the group.
+ iscsi_enabled:
+ required: False
+ type: bool
+ description:
+ - Whether iSCSI is enabled on this group.
+ isns_enabled:
+ required: False
+ type: bool
+ description:
+ - Whether iSNS is enabled.
+ isns_port:
+ required: False
+ type: int
+ description:
+ - Port number for iSNS Server. Positive integer value up to 65535 representing TCP/IP port.
+ isns_server:
+ required: False
+ type: str
+ description:
+ - Hostname or IP Address of iSNS Server.
+ level:
+ required: False
+ choices:
+ - info
+ - notice
+ - warning
+ - critical
+ type: str
+ description:
+ - Level of the test alert.
+ login_banner_after_auth:
+ required: False
+ type: bool
+ description:
+ - Should the banner be displayed before the user credentials are prompted or after prompting the user credentials.
+ login_banner_message:
+ required: False
+ type: str
+ description:
+ - The message for the login banner that is displayed during user login activity. String upto 2048 characters.
+ login_banner_reset:
+ required: False
+ type: str
+ description:
+ - This will reset the banner to the version of the installed NOS. When login_banner_after_auth is specified, login_banner_reset can not be set to true.
+ merge:
+ required: False
+ type: bool
+ description:
+ - Perform group merge with the specified group.
+ migrate:
+ required: False
+ type: bool
+ description:
+ - Migrate the group Management Service to the current group Management Service backup array.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the group.
+ ntp_server:
+ required: False
+ type: str
+ description:
+ - Either IP address or hostname of the NTP server for this group. Plain string.
+ proxy_port:
+ required: False
+ type: int
+ description:
+ - Proxy Port of HTTP Proxy Server. Integer value between 0-65535 representing TCP/IP port.
+ proxy_server:
+ required: False
+ type: str
+ description:
+ - Hostname or IP Address of HTTP Proxy Server. Setting this attribute to an empty string will unset all proxy settings.
+ proxy_username:
+ required: False
+ type: str
+ description:
+ - Username to authenticate with HTTP Proxy Server. HTTP proxy server username, string up to 255 characters, special
+ - characters ([, ], `, ;, ampersand, tab, space, newline) are not allowed.
+ proxy_password:
+ required: False
+ type: str
+ description:
+ - Password to authenticate with HTTP Proxy Server.
+ reboot:
+ required: False
+ type: bool
+ description:
+ - Reboot all arrays in the group.
+ repl_throttle_list:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - All the replication bandwidth limits on the system. All the throttles for the partner.
+ send_alert_to_support:
+ required: False
+ type: bool
+ description:
+ - Whether to send alert to Support.
+ skip_secondary_mgmt_ip:
+ required: False
+ type: bool
+ description:
+ - Skip check for secondary management IP address.
+ smtp_auth_enabled:
+ required: False
+ type: bool
+ description:
+ - Whether SMTP Server requires authentication.
+ smtp_auth_password:
+ required: False
+ type: str
+ description:
+ - Password to authenticate with SMTP Server.
+ smtp_auth_username:
+ required: False
+ type: str
+ description:
+ - Username to authenticate with SMTP Server.
+ smtp_port:
+ required: False
+ type: int
+ description:
+ - Port number of SMTP Server.
+ smtp_encrypt_type:
+ required: False
+ choices:
+ - none
+ - starttls
+ - ssl
+ type: str
+ description:
+ - Level of encryption for SMTP.
+ snmp_community:
+ required: False
+ type: str
+ description:
+ - Community string to be used with SNMP.
+ snmp_get_enabled:
+ required: False
+ type: bool
+ description:
+ - Whether to accept SNMP get commands.
+ snmp_get_port:
+ required: False
+ type: int
+ description:
+ - Port number to which SNMP get requests should be sent.
+ snmp_trap_enabled:
+ required: False
+ type: bool
+ description:
+ - Whether to enable SNMP traps.
+ snmp_trap_host:
+ required: False
+ type: str
+ description:
+ - Hostname or IP Address to send SNMP traps.
+ snmp_trap_port:
+ required: False
+ type: int
+ description:
+ - Port number of SNMP trap host.
+ snmp_sys_contact:
+ required: False
+ type: str
+ description:
+ - Name of the SNMP administrator. Plain string.
+ snmp_sys_location:
+ required: False
+ type: str
+ description:
+ - Location of the group. Plain string.
+ src_group_ip:
+ required: False
+ type: str
+ description:
+ - IP address of the source group.
+ src_group_name:
+ required: False
+ type: str
+ description:
+ - Name of the source group.
+ src_username:
+ required: False
+ type: str
+ description:
+ - Username of the source group.
+ src_passphrase:
+ required: False
+ type: str
+ description:
+ - Source group encryption passphrase. Encryption passphrase. String with size from 8 to 64 printable characters.
+ src_password:
+ required: False
+ type: str
+ description:
+ - Password of the source group.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ type: str
+ description:
+ - The group operation.
+ syslogd_enabled:
+ required: False
+ type: bool
+ description:
+ - Is syslogd enabled on this system.
+ syslogd_port:
+ required: False
+ type: int
+ description:
+ - Port number for syslogd server.
+ syslogd_server:
+ required: False
+ type: str
+ description:
+ - Hostname of the syslogd server.
+ tdz_enabled:
+ required: False
+ type: bool
+ description:
+ - Is Target Driven Zoning (TDZ) enabled on this group.
+ tdz_prefix:
+ required: False
+ type: str
+ description:
+ - Target Driven Zoning (TDZ) prefix for peer zones created by TDZ.
+ test_alert:
+ required: False
+ type: bool
+ description:
+ - Generate a test alert.
+ timezone:
+ required: False
+ type: str
+ description:
+ - Timezone in which this group is located. Plain string.
+ tlsv1_enabled:
+ required: False
+ type: bool
+ description:
+ - Enable or disable TLSv1.0 and TLSv1.1.
+ user_inactivity_timeout:
+ required: False
+ type: int
+ description:
+ - The amount of time in seconds that the user session is inactive before timing out. User inactivity timeout in second, valid range is from 1 to 43200.
+ validate_merge:
+ required: False
+ type: bool
+ description:
+ - Perform group merge validation.
+ vss_validation_timeout:
+ required: False
+ type: int
+ description:
+ - The amount of time in seconds to validate Microsoft VSS application synchronization before timing out. VSS validation timeout in second,
+ valid range is from 1 to 3600.
+ vvol_enabled:
+ required: False
+ type: bool
+ description:
+ - Are vVol enabled on this group.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage group
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+- name: Update group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ send_alert_to_support: "{{ send_alert_to_support }}"
+ alert_to_email_addrs: "{{ alert_to_email_addrs }}"
+ state: "present"
+
+- name: Reboot group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "present"
+ reboot: true
+
+- name: Halt group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "present"
+ halt: true
+
+- name: Validate merge group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ src_group_ip: "{{ src_group_ip }}"
+ src_password: "{{ src_password }}"
+ skip_secondary_mgmt_ip: "{{ skip_secondary_mgmt_ip }}"
+ src_passphrase: "{{ src_passphrase }}"
+ state: "present"
+ validate_merge: true
+
+- name: Merge group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ src_group_ip: "{{ src_group_ip }}"
+ src_password: "{{ src_password }}"
+ skip_secondary_mgmt_ip: "{{ skip_secondary_mgmt_ip }}"
+ src_passphrase: "{{ src_passphrase }}"
+ state: "present"
+ merge: true
+
+- name: Test alert group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ level: "{{ level }}"
+ state: "present"
+ test_alert: true
+
+- name: Migrate group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "present"
+ migrate: true
+
+- name: Check migrate group
+ hpe.nimble.hpe_nimble_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "present"
+ check_migrate: true
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def update_group(
+ client_obj,
+ group_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Update group failed as it is not present.", {}, {})
+
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Group '{group_name}' cannot be updated as it is not present.", {}, {})
+
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(group_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ group_resp = client_obj.groups.update(id=group_resp.attrs.get("id"), **params)
+ return (True, True, f"Group '{group_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, group_resp.attrs)
+ else:
+ return (True, False, f"Group '{group_resp.attrs.get('name')}' already present in given state.", {}, group_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Group update failed | '{ex}'", {}, {})
+
+
+def reboot_group(
+ client_obj,
+ group_name):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Reboot group failed as it is not present.", {})
+
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Group '{group_name}' cannot be rebooted as it is not present.", {})
+
+ client_obj.groups.reboot(id=group_resp.attrs.get("id"))
+ return (True, True, f"Rebooted group '{group_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Reboot group failed | '{ex}'", {})
+
+
+def halt_group(
+ client_obj,
+ group_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Halt group failed as it is not present.", {})
+
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Group '{group_name}' cannot be halted as it is not present.", {})
+ params = utils.remove_null_args(**kwargs)
+ client_obj.groups.halt(id=group_resp.attrs.get("id"), **params)
+ return (True, True, f"Halted group '{group_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Halt group failed | '{ex}'", {})
+
+
+def test_alert_group(
+ client_obj,
+ group_name,
+ level):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Test alert for group failed as it is not present.", {})
+
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Test alert for group '{group_name}' cannot be done as it is not present.", {})
+
+ client_obj.groups.test_alert(id=group_resp.attrs.get("id"), level=level)
+ return (True, True, f"Tested alert for group '{group_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Test alert for group failed | '{ex}'", {})
+
+
+def validate_merge_group(
+ client_obj,
+ group_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Validate merge for group failed as it is not present.", {}, {})
+ try:
+
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Validate merge for group '{group_name}' cannot be done as it is not present.", {}, {})
+
+ params = utils.remove_null_args(**kwargs)
+ validate_merge_resp = client_obj.groups.validate_merge(id=group_resp.attrs.get("id"), **params)
+
+ if hasattr(validate_merge_resp, 'attrs'):
+ validate_merge_resp = validate_merge_resp.attrs
+
+ if utils.is_null_or_empty(validate_merge_resp.get("validation_error_msg")):
+ return (True, False, f"Validate merge operation for group '{group_name}' done successfully.", {}, validate_merge_resp)
+ else:
+ msg = validate_merge_resp.get("validation_error_msg")
+ return (False, False, f"Validate merge operation for group '{group_name}' failed with error '{msg}'", {}, validate_merge_resp)
+ except Exception as ex:
+ return (False, False, f"Validate merge for group failed | '{ex}'", {}, {})
+
+
+def merge_group(
+ client_obj,
+ group_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Merge for group failed as it is not present.", {}, {})
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Merge for group '{group_name}' cannot be done as it is not present.", {}, {})
+
+ params = utils.remove_null_args(**kwargs)
+ merge_resp = client_obj.groups.merge(id=group_resp.attrs.get("id"), **params)
+
+ if hasattr(merge_resp, 'attrs'):
+ merge_resp = merge_resp.attrs
+ return (True, True, f"Merged group '{group_name}' successfully.", {}, merge_resp)
+ except Exception as ex:
+ return (False, False, f"Merge for group failed | '{ex}'", {}, {})
+
+
+def check_migrate_group(
+ client_obj,
+ group_name):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Check migrate for group failed as it is not present.", {})
+
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Check migrate for group '{group_name}' cannot be done as it is not present.", {})
+
+ client_obj.groups.check_migrate(id=group_resp.attrs.get("id"))
+ return (True, True, f"Check migrate for group '{group_name}' done successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Check migrate for group failed | '{ex}'", {})
+
+
+def migrate_group(
+ client_obj,
+ group_name):
+
+ if utils.is_null_or_empty(group_name):
+ return (False, False, "Group migrate failed as it is not present.", {})
+
+ try:
+ group_resp = client_obj.groups.get(id=None, name=group_name)
+ if utils.is_null_or_empty(group_resp):
+ return (False, False, f"Migrate for group '{group_name}' cannot be done as it is not present.", {})
+
+ client_obj.groups.migrate(id=group_resp.attrs.get("id"))
+ return (True, True, f"Group '{group_name}' migrated successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Group migrate failed | '{ex}'", {})
+
+
+def main():
+
+ fields = {
+ "alarms": {
+ "required": False,
+ "type": "bool"
+ },
+ "alert_to_email_addrs": {
+ "required": False,
+ "type": "str"
+ },
+ "alert_from_email_addrs": {
+ "required": False,
+ "type": "str"
+ },
+ "alert_min_level": {
+ "required": False,
+ "choices": ['info',
+ 'notice',
+ 'warning',
+ 'critical'
+ ],
+ "type": "str"
+ },
+ "allow_analytics_gui": {
+ "required": False,
+ "type": "bool"
+ },
+ "allow_support_tunnel": {
+ "required": False,
+ "type": "bool"
+ },
+ "auto_switchover": {
+ "required": False,
+ "type": "bool"
+ },
+ "autoclean_unmanaged_snapshots": {
+ "required": False,
+ "type": "bool"
+ },
+ "autoclean_unmanaged_snapshots_ttl_unit": {
+ "required": False,
+ "type": "int"
+ },
+ "autosupport": {
+ "required": False,
+ "type": "bool"
+ },
+ "cc_mode": {
+ "required": False,
+ "type": "bool"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "check_migrate": {
+ "required": False,
+ "type": "bool"
+ },
+ "date": {
+ "required": False,
+ "type": "int"
+ },
+ "default_iscsi_target_scope": {
+ "required": False,
+ "choices": ['volume',
+ 'group'
+ ],
+ "type": "str"
+ },
+ "default_volume_limit": {
+ "required": False,
+ "type": "int"
+ },
+ "domain_name": {
+ "required": False,
+ "type": "str"
+ },
+ "dns_servers": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "fc_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "force": {
+ "required": False,
+ "type": "bool",
+ "default": False
+ },
+ "group_snapshot_ttl": {
+ "required": False,
+ "type": "int"
+ },
+ "group_target_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "group_target_name": {
+ "required": False,
+ "type": "str"
+ },
+ "halt": {
+ "required": False,
+ "type": "bool"
+ },
+ "iscsi_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "isns_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "isns_port": {
+ "required": False,
+ "type": "int"
+ },
+ "isns_server": {
+ "required": False,
+ "type": "str"
+ },
+ "level": {
+ "required": False,
+ "choices": ['info',
+ 'notice',
+ 'warning',
+ 'critical'
+ ],
+ "type": "str"
+ },
+ "login_banner_after_auth": {
+ "required": False,
+ "type": "bool"
+ },
+ "login_banner_message": {
+ "required": False,
+ "type": "str"
+ },
+ "login_banner_reset": {
+ "required": False,
+ "type": "str"
+ },
+ "merge": {
+ "required": False,
+ "type": "bool"
+ },
+ "migrate": {
+ "required": False,
+ "type": "bool"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "ntp_server": {
+ "required": False,
+ "type": "str"
+ },
+ "proxy_port": {
+ "required": False,
+ "type": "int"
+ },
+ "proxy_server": {
+ "required": False,
+ "type": "str"
+ },
+ "proxy_username": {
+ "required": False,
+ "type": "str"
+ },
+ "proxy_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "reboot": {
+ "required": False,
+ "type": "bool"
+ },
+ "repl_throttle_list": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "send_alert_to_support": {
+ "required": False,
+ "type": "bool"
+ },
+ "skip_secondary_mgmt_ip": {
+ "required": False,
+ "type": "bool"
+ },
+ "smtp_auth_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "smtp_auth_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "smtp_auth_username": {
+ "required": False,
+ "type": "str"
+ },
+ "smtp_port": {
+ "required": False,
+ "type": "int"
+ },
+ "smtp_encrypt_type": {
+ "required": False,
+ "choices": ['none',
+ 'starttls',
+ 'ssl'
+ ],
+ "type": "str"
+ },
+ "snmp_community": {
+ "required": False,
+ "type": "str"
+ },
+ "snmp_get_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "snmp_get_port": {
+ "required": False,
+ "type": "int"
+ },
+ "snmp_trap_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "snmp_trap_host": {
+ "required": False,
+ "type": "str"
+ },
+ "snmp_trap_port": {
+ "required": False,
+ "type": "int"
+ },
+ "snmp_sys_contact": {
+ "required": False,
+ "type": "str"
+ },
+ "snmp_sys_location": {
+ "required": False,
+ "type": "str"
+ },
+ "src_group_ip": {
+ "required": False,
+ "type": "str"
+ },
+ "src_group_name": {
+ "required": False,
+ "type": "str"
+ },
+ "src_username": {
+ "required": False,
+ "type": "str"
+ },
+ "src_passphrase": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "src_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent'
+ ],
+ "type": "str"
+ },
+ "syslogd_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "syslogd_port": {
+ "required": False,
+ "type": "int"
+ },
+ "syslogd_server": {
+ "required": False,
+ "type": "str"
+ },
+ "tdz_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "tdz_prefix": {
+ "required": False,
+ "type": "str"
+ },
+ "test_alert": {
+ "required": False,
+ "type": "bool"
+ },
+ "timezone": {
+ "required": False,
+ "type": "str"
+ },
+ "tlsv1_enabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "user_inactivity_timeout": {
+ "required": False,
+ "type": "int"
+ },
+ "validate_merge": {
+ "required": False,
+ "type": "bool"
+ },
+ "vss_validation_timeout": {
+ "required": False,
+ "type": "int"
+ },
+ "vvol_enabled": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ alarms = module.params["alarms"]
+ alert_to_email_addrs = module.params["alert_to_email_addrs"]
+ alert_from_email_addrs = module.params["alert_from_email_addrs"]
+ alert_min_level = module.params["alert_min_level"]
+ allow_analytics_gui = module.params["allow_analytics_gui"]
+ allow_support_tunnel = module.params["allow_support_tunnel"]
+ auto_switchover = module.params["auto_switchover"]
+ autoclean_unmanaged_snapshots = module.params["autoclean_unmanaged_snapshots"]
+ autoclean_unmanaged_snapshots_ttl_unit = module.params["autoclean_unmanaged_snapshots_ttl_unit"]
+ autosupport = module.params["autosupport"]
+ cc_mode = module.params["cc_mode"]
+ change_name = module.params["change_name"]
+ check_migrate = module.params["check_migrate"]
+ date = module.params["date"]
+ default_iscsi_target_scope = module.params["default_iscsi_target_scope"]
+ default_volume_limit = module.params["default_volume_limit"]
+ domain_name = module.params["domain_name"]
+ dns_servers = module.params["dns_servers"]
+ fc_enabled = module.params["fc_enabled"]
+ force = module.params["force"]
+ group_snapshot_ttl = module.params["group_snapshot_ttl"]
+ group_target_enabled = module.params["group_target_enabled"]
+ group_target_name = module.params["group_target_name"]
+ halt = module.params["halt"]
+ iscsi_enabled = module.params["iscsi_enabled"]
+ isns_enabled = module.params["isns_enabled"]
+ isns_port = module.params["isns_port"]
+ isns_server = module.params["isns_server"]
+ level = module.params["level"]
+ login_banner_after_auth = module.params["login_banner_after_auth"]
+ login_banner_message = module.params["login_banner_message"]
+ login_banner_reset = module.params["login_banner_reset"]
+ merge = module.params["merge"]
+ migrate = module.params["migrate"]
+ group_name = module.params["name"]
+ ntp_server = module.params["ntp_server"]
+ proxy_port = module.params["proxy_port"]
+ proxy_server = module.params["proxy_server"]
+ proxy_username = module.params["proxy_username"]
+ proxy_password = module.params["proxy_password"]
+ reboot = module.params["reboot"]
+ repl_throttle_list = module.params["repl_throttle_list"]
+ send_alert_to_support = module.params["send_alert_to_support"]
+ skip_secondary_mgmt_ip = module.params["skip_secondary_mgmt_ip"]
+ smtp_auth_enabled = module.params["smtp_auth_enabled"]
+ smtp_auth_password = module.params["smtp_auth_password"]
+ smtp_auth_username = module.params["smtp_auth_username"]
+ smtp_port = module.params["smtp_port"]
+ smtp_encrypt_type = module.params["smtp_encrypt_type"]
+ snmp_community = module.params["snmp_community"]
+ snmp_get_enabled = module.params["snmp_get_enabled"]
+ snmp_get_port = module.params["snmp_get_port"]
+ snmp_trap_enabled = module.params["snmp_trap_enabled"]
+ snmp_trap_host = module.params["snmp_trap_host"]
+ snmp_trap_port = module.params["snmp_trap_port"]
+ snmp_sys_contact = module.params["snmp_sys_contact"]
+ snmp_sys_location = module.params["snmp_sys_location"]
+ src_group_ip = module.params["src_group_ip"]
+ src_group_name = module.params["src_group_name"]
+ src_username = module.params["src_username"]
+ src_passphrase = module.params["src_passphrase"]
+ src_password = module.params["src_password"]
+ state = module.params["state"]
+ syslogd_enabled = module.params["syslogd_enabled"]
+ syslogd_port = module.params["syslogd_port"]
+ syslogd_server = module.params["syslogd_server"]
+ tdz_enabled = module.params["tdz_enabled"]
+ tdz_prefix = module.params["tdz_prefix"]
+ test_alert = module.params["test_alert"]
+ timezone = module.params["timezone"]
+ tlsv1_enabled = module.params["tlsv1_enabled"]
+ user_inactivity_timeout = module.params["user_inactivity_timeout"]
+ validate_merge = module.params["validate_merge"]
+ vss_validation_timeout = module.params["vss_validation_timeout"]
+ vvol_enabled = module.params["vvol_enabled"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "present":
+ if reboot is True:
+ return_status, changed, msg, changed_attrs_dict = reboot_group(client_obj, group_name)
+
+ elif halt is True:
+ return_status, changed, msg, changed_attrs_dict = halt_group(client_obj, group_name, force=force)
+
+ elif test_alert is True:
+ return_status, changed, msg, changed_attrs_dict = test_alert_group(client_obj, group_name, level)
+
+ elif validate_merge is True:
+ return_status, changed, msg, changed_attrs_dict, resp = validate_merge_group(
+ client_obj,
+ group_name,
+ src_group_ip=src_group_ip,
+ src_group_name=src_group_name,
+ src_password=src_password,
+ src_username=src_username,
+ skip_secondary_mgmt_ip=skip_secondary_mgmt_ip,
+ src_passphrase=src_passphrase)
+
+ elif merge is True:
+ return_status, changed, msg, changed_attrs_dict, resp = merge_group(
+ client_obj,
+ group_name,
+ src_group_ip=src_group_ip,
+ src_group_name=src_group_name,
+ src_password=src_password,
+ src_username=src_username,
+ force=force,
+ skip_secondary_mgmt_ip=skip_secondary_mgmt_ip,
+ src_passphrase=src_passphrase)
+
+ elif check_migrate is True:
+ return_status, changed, msg, changed_attrs_dict = check_migrate_group(client_obj, group_name)
+
+ elif migrate is True:
+ return_status, changed, msg, changed_attrs_dict = migrate_group(client_obj, group_name)
+
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_group(
+ client_obj,
+ group_name,
+ name=change_name,
+ alarms=alarms,
+ alert_to_email_addrs=alert_to_email_addrs,
+ alert_from_email_addrs=alert_from_email_addrs,
+ alert_min_level=alert_min_level,
+ allow_analytics_gui=allow_analytics_gui,
+ allow_support_tunnel=allow_support_tunnel,
+ auto_switchover=auto_switchover,
+ autoclean_unmanaged_snapshots=autoclean_unmanaged_snapshots,
+ autoclean_unmanaged_snapshots_ttl_unit=autoclean_unmanaged_snapshots_ttl_unit,
+ autosupport=autosupport,
+ cc_mode=cc_mode,
+ date=date,
+ default_iscsi_target_scope=default_iscsi_target_scope,
+ default_volume_limit=default_volume_limit,
+ domain_name=domain_name,
+ dns_servers=dns_servers,
+ fc_enabled=fc_enabled,
+ group_snapshot_ttl=group_snapshot_ttl,
+ group_target_enabled=group_target_enabled,
+ group_target_name=group_target_name,
+ iscsi_enabled=iscsi_enabled,
+ isns_enabled=isns_enabled,
+ isns_port=isns_port,
+ isns_server=isns_server,
+ login_banner_after_auth=login_banner_after_auth,
+ login_banner_message=login_banner_message,
+ login_banner_reset=login_banner_reset,
+ ntp_server=ntp_server,
+ proxy_port=proxy_port,
+ proxy_password=proxy_password,
+ proxy_server=proxy_server,
+ proxy_username=proxy_username,
+ repl_throttle_list=repl_throttle_list,
+ send_alert_to_support=send_alert_to_support,
+ smtp_auth_enabled=smtp_auth_enabled,
+ smtp_auth_password=smtp_auth_password,
+ smtp_auth_username=smtp_auth_username,
+ smtp_port=smtp_port,
+ smtp_encrypt_type=smtp_encrypt_type,
+ snmp_community=snmp_community,
+ snmp_get_enabled=snmp_get_enabled,
+ snmp_get_port=snmp_get_port,
+ snmp_trap_enabled=snmp_trap_enabled,
+ snmp_trap_host=snmp_trap_host,
+ snmp_trap_port=snmp_trap_port,
+ snmp_sys_contact=snmp_sys_contact,
+ snmp_sys_location=snmp_sys_location,
+ syslogd_enabled=syslogd_enabled,
+ syslogd_port=syslogd_port,
+ syslogd_server=syslogd_server,
+ tdz_enabled=tdz_enabled,
+ tdz_prefix=tdz_prefix,
+ timezone=timezone,
+ tlsv1_enabled=tlsv1_enabled,
+ user_inactivity_timeout=user_inactivity_timeout,
+ vss_validation_timeout=vss_validation_timeout,
+ vvol_enabled=vvol_enabled)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = reboot_group(client_obj, group_name)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ if utils.is_null_or_empty(resp):
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_info.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_info.py
new file mode 100644
index 00000000..51fd94b3
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_info.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description:
+ - Collect information from a HPE Nimble Storage array. By default, the module will collect basic information
+ including array, groups config, protection templates, protection schedules, snapshots, snapshot collections, volume
+ collections and volume counts. Additional information can be collected based on the configured set of arguments.
+module: hpe_nimble_info
+options:
+ gather_subset:
+ required: False
+ default: minimum
+ type: list
+ elements: raw
+ description:
+ - When supplied, this argument will define the information to be collected. Possible values for this include "all" "minimum" "config"
+ "access_control_records", "alarms", "application_servers", "application_categories", "arrays", "chap_users", "controllers", "disks",
+ "fibre_channel_interfaces", "fibre_channel_configs", "fibre_channel_initiator_aliases", "fibre_channel_ports", "folders", "groups",
+ "initiator_groups", "initiators", "master_key", "network_configs", "performance_policies", "pools", "protection_schedules",
+ "protection_templates", "protocol_endpoints", "replication_partners", "shelves", "snapshots", "snapshot_collections", "software_versions",
+ "user_groups", "user_policies", "users", "volumes", "volume_collections".
+
+ - Each subset except "all", "minimum" and "config" supports four types of subset options. Subset "all" supports limit and detail as subset options.
+ Subset "config" and "minimum" does not support any subset options.
+
+ - See the example section for usage of the following subset options.
+ - fields - A string representing which attributes to display for a given subset.
+ - limit - An integer value which represents how many latest items to show for a given subset.
+ - detail - A bool flag when set to true fetches everything for a given subset. Default is "True".
+ - query - A key-value pair to query.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Collect information from HPE Nimble Storage array
+version_added: "1.0.0"
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+- name: Collect default set of information
+ hpe.nimble.hpe_nimble_info:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ gather_subset:
+ - minimum:
+ register: array_info
+
+- name: Show default information
+ ansible.builtin.debug:
+ msg: "{{ array_info['nimble_info']['default'] }}"
+
+- name: Collect config
+ hpe.nimble.hpe_nimble_info:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ gather_subset:
+ - config:
+ register: array_info
+
+- name: Show config information
+ ansible.builtin.debug:
+ msg: "{{ array_info['nimble_info']['config'] }}"
+
+- name: Collect all
+ hpe.nimble.hpe_nimble_info:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ gather_subset:
+ - all:
+ limit: 1
+ register: array_info
+
+- name: Show all information
+ ansible.builtin.debug:
+ msg: "{{ array_info['nimble_info'] }}"
+
+- name: Collect volume, snapshot and volume collection. Below query will show just one
+ snapshot detail with attributes 'name and id' for a volume called 'vol1'
+ hpe.nimble.hpe_nimble_info:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ gather_subset:
+ - volumes:
+ fields: "name,id"
+ limit: 2
+ - volume_collections:
+ limit: 1
+ detail: false
+ - snapshots:
+ fields: "name,id"
+ query:
+ vol_name: "vol1"
+ limit: 1
+ detail: True
+ register: array_info
+
+- name: Show information
+ ansible.builtin.debug:
+ msg: "{{ array_info['nimble_info'] }}"
+
+'''
+RETURN = r'''
+nimble_info:
+ description: Returns the information collected from the HPE Nimble Storage array
+ returned: always
+ type: complex
+ contains: {}
+ sample: {
+ "config": {
+ "arrays": [
+ {
+ "all_flash": false,
+ "extended_model": "vmware-4G-5T-160F",
+ "full_name": "ansibler1-va",
+ "role": "leader",
+ "serial": "ansibler1-va"
+ }
+ ],
+ "groups": [
+ {
+ "alarms_enabled": true,
+ "auto_switchover_enabled": true,
+ "auto_switchover_messages": [],
+ "autosupport_enabled": true,
+ "default_iscsi_target_scope": "group",
+ "dns_servers": [
+ {
+ "ip_addr": "10.235.0.185"
+ },
+ {
+ "ip_addr": "10.1.255.254"
+ }
+ ],
+ "domain_name": "vlab.nimblestorage.com",
+ "encryption_config": {
+ "cipher": "aes_256_xts",
+ "encryption_active": true,
+ "encryption_key_manager": "local",
+ "master_key_set": true,
+ "mode": "available",
+ "scope": "group"
+ },
+ "failover_mode": "Manual",
+ "fc_enabled": false,
+ "iscsi_enabled": true,
+ "isns_enabled": true,
+ "leader_array_name": "ansibler1-va",
+ "member_list": [
+ "ansibler1-va"
+ ],
+ "name": "group-ansibler1-va",
+ "ntp_server": "time.nimblestorage.com",
+ "send_alert_to_support": true,
+ "smtp_auth_enabled": false,
+ "smtp_auth_username": "",
+ "smtp_port": 25,
+ "smtp_server": "",
+ "snmp_community": "public",
+ "snmp_trap_enabled": false,
+ "snmp_trap_host": "",
+ "snmp_trap_port": 162,
+ "syslogd_enabled": false,
+ "syslogd_server": "",
+ "vvol_enabled": true
+ }
+ ],
+ "network_configs": [
+ {
+ "active_since": 1592210265,
+ "array_list": [
+ {
+ "ctrlr_a_support_ip": "10.18.1.1",
+ "ctrlr_b_support_ip": "10.18.2.2",
+ "member_gid": 1,
+ "name": "ansibler1-va",
+ "nic_list": [
+ {
+ "data_ip": "172.16.41.139",
+ "name": "eth3",
+ "subnet_label": "data1",
+ "tagged": false
+ },
+ {
+ "data_ip": "172.16.234.76",
+ "name": "eth4",
+ "subnet_label": "data2",
+ "tagged": false
+ },
+ {
+ "data_ip": "",
+ "name": "eth2",
+ "subnet_label": "mgmt-data",
+ "tagged": false
+ },
+ {
+ "data_ip": "",
+ "name": "eth1",
+ "subnet_label": "mgmt-data",
+ "tagged": false
+ }
+ ]
+ }
+ ],
+ "creation_time": 1586411318,
+ "group_leader_array": "ansibler1-va",
+ "id": "177321e77f009f2013000000000000000000000001",
+ "iscsi_automatic_connection_method": true,
+ "iscsi_connection_rebalancing": true,
+ "last_active": 1592210256,
+ "last_modified": 1586411356,
+ "mgmt_ip": "10.18.171.96",
+ "name": "active",
+ "role": "active",
+ "route_list": [
+ {
+ "gateway": "10.18.160.1",
+ "tgt_netmask": "0.0.0.0",
+ "tgt_network": "0.0.0.0"
+ }
+ ],
+ "secondary_mgmt_ip": "",
+ "subnet_list": [
+ {
+ "allow_group": true,
+ "allow_iscsi": true,
+ "discovery_ip": "172.16.41.140",
+ "failover": true,
+ "failover_enable_time": 0,
+ "label": "data1",
+ "mtu": 1500,
+ "netmask": "255.255.224.0",
+ "network": "172.16.32.0",
+ "netzone_type": "single",
+ "type": "data",
+ "vlan_id": 0
+ },
+ {
+ "allow_group": true,
+ "allow_iscsi": true,
+ "discovery_ip": "172.16.234.101",
+ "failover": true,
+ "failover_enable_time": 0,
+ "label": "data2",
+ "mtu": 1500,
+ "netmask": "255.255.224.0",
+ "network": "172.16.224.0",
+ "netzone_type": "single",
+ "type": "data",
+ "vlan_id": 0
+ },
+ {
+ "allow_group": false,
+ "allow_iscsi": false,
+ "discovery_ip": "",
+ "failover": true,
+ "failover_enable_time": 0,
+ "label": "mgmt-data",
+ "mtu": 1500,
+ "netmask": "255.255.224.0",
+ "network": "10.18.160.0",
+ "netzone_type": "none",
+ "type": "mgmt",
+ "vlan_id": 0
+ }
+ ]
+ },
+ {
+ "active_since": 0,
+ "array_list": [
+ {
+ "ctrlr_a_support_ip": "10.18.1.1",
+ "ctrlr_b_support_ip": "10.18.2.2",
+ "member_gid": 1,
+ "name": "ansibler1-va",
+ "nic_list": [
+ {
+ "data_ip": "",
+ "name": "eth2",
+ "subnet_label": "mgmt-data",
+ "tagged": false
+ },
+ {
+ "data_ip": "",
+ "name": "eth1",
+ "subnet_label": "mgmt-data",
+ "tagged": false
+ },
+ {
+ "data_ip": "172.16.41.139",
+ "name": "eth3",
+ "subnet_label": "data1",
+ "tagged": false
+ },
+ {
+ "data_ip": "172.16.234.76",
+ "name": "eth4",
+ "subnet_label": "data2",
+ "tagged": false
+ }
+ ]
+ }
+ ],
+ "creation_time": 1586411356,
+ "group_leader_array": "ansibler1-va",
+ "id": "177321e77f009f2013000000000000000000000002",
+ "iscsi_automatic_connection_method": true,
+ "iscsi_connection_rebalancing": true,
+ "last_active": 1592210265,
+ "last_modified": 1586411318,
+ "mgmt_ip": "10.18.171.96",
+ "name": "backup",
+ "role": "backup",
+ "route_list": [
+ {
+ "gateway": "10.18.160.1",
+ "tgt_netmask": "0.0.0.0",
+ "tgt_network": "0.0.0.0"
+ }
+ ],
+ "secondary_mgmt_ip": "",
+ "subnet_list": [
+ {
+ "allow_group": false,
+ "allow_iscsi": false,
+ "discovery_ip": "",
+ "failover": true,
+ "failover_enable_time": 0,
+ "label": "mgmt-data",
+ "mtu": 1500,
+ "netmask": "255.255.224.0",
+ "network": "10.18.160.0",
+ "netzone_type": "none",
+ "type": "mgmt",
+ "vlan_id": 0
+ },
+ {
+ "allow_group": true,
+ "allow_iscsi": true,
+ "discovery_ip": "172.16.41.140",
+ "failover": true,
+ "failover_enable_time": 0,
+ "label": "data1",
+ "mtu": 1500,
+ "netmask": "255.255.224.0",
+ "network": "172.16.32.0",
+ "netzone_type": "single",
+ "type": "data",
+ "vlan_id": 0
+ },
+ {
+ "allow_group": true,
+ "allow_iscsi": true,
+ "discovery_ip": "172.16.234.101",
+ "failover": true,
+ "failover_enable_time": 0,
+ "label": "data2",
+ "mtu": 1500,
+ "netmask": "255.255.224.0",
+ "network": "172.16.224.0",
+ "netzone_type": "single",
+ "type": "data",
+ "vlan_id": 0
+ }
+ ]
+ }
+ ],
+ "pools": [
+ {
+ "array_count": 1,
+ "dedupe_all_volumes": false,
+ "dedupe_capable": false,
+ "is_default": true,
+ "name": "default",
+ "vol_list": [
+ {
+ "id": "0675a5e21cc205c609000000000000000000000001",
+ "name": "vol1",
+ "vol_id": "0675a5e21cc205c609000000000000000000000001",
+ "vol_name": "vol1"
+ },
+ {
+ "id": "067321e77f009f2013000000000000000000000271",
+ "name": "volumetc-vol1-0-24-07-2020-71470d6d-cd6e-11ea-9165-00505696c568",
+ "vol_id": "067321e77f009f2013000000000000000000000271",
+ "vol_name": "volumetc-vol1-0-24-07-2020-71470d6d-cd6e-11ea-9165-00505696c568"
+ },
+ {
+ "id": "067321e77f009f201300000000000000000000024d",
+ "name": "ansible-vol1",
+ "vol_id": "067321e77f009f201300000000000000000000024d",
+ "vol_name": "ansible-vol1"
+ }
+ ]
+ }
+ ]
+ },
+ "default": {
+ "arrays": [
+ {
+ "all_flash": false,
+ "extended_model": "vmware-4G-5T-160F",
+ "full_name": "ansibler1-va"
+ }
+ ],
+ "disks": 16,
+ "folders": 0,
+ "groups": [
+ {
+ "auto_switchover_messages": [],
+ "default_iscsi_target_scope": "group",
+ "encryption_config": {
+ "cipher": "aes_256_xts",
+ "encryption_active": true,
+ "encryption_key_manager": "local",
+ "master_key_set": true,
+ "mode": "available",
+ "scope": "group"
+ },
+ "fc_enabled": false,
+ "iscsi_enabled": true,
+ "leader_array_name": "ansibler1-va",
+ "name": "group-ansibler1-va",
+ "num_snaps": 49
+ }
+ ],
+ "initiator_groups": 1,
+ "protection_schedules": 6,
+ "protection_templates": 3,
+ "protocol_endpoints": 0,
+ "snapshot_collections": 49,
+ "snapshots": 49,
+ "software_versions": "5.2.2.0-730069-opt",
+ "users": 2,
+ "volume_collections": 1,
+ "volumes": 3
+ },
+ "snapshots":
+ [
+ {
+ "access_control_records": null,
+ "agent_type": "none",
+ "app_uuid": "",
+ "creation_time": 1586429663,
+ "description": "Replicated by protection policy volcoll2 schedule Schedule-new",
+ "expiry_after": 1,
+ "expiry_time": 0,
+ "id": "0475a5e21cc205c609000000000000000200000004",
+ "is_manually_managed": true,
+ "is_replica": true,
+ "is_unmanaged": false,
+ "last_modified": 1586429956,
+ "metadata": null,
+ "name": "adfsasfasfasf",
+ "new_data_compressed_bytes": 0,
+ "new_data_uncompressed_bytes": 0,
+ "new_data_valid": true,
+ "offline_reason": "user",
+ "online": false,
+ "origin_name": "",
+ "pool_name": "default",
+ "replication_status": null,
+ "schedule_id": "0c7321e77f009f2013000000000000000000000008",
+ "schedule_name": "Schedule-new",
+ "serial_number": "022e0240e677ef2f6c9ce9006cc7be73",
+ "size": 1073741824,
+ "snap_collection_id": "0575a5e21cc205c609000000000000000000000004",
+ "snap_collection_name": "adfsasfasfasf",
+ "target_name": "iqn.2007-11.com.nimblestorage:group-ansibler1-va-g7321e77f009f2013",
+ "vol_id": "0675a5e21cc205c609000000000000000000000001",
+ "vol_name": "vol1",
+ "vpd_ieee0": "022e0240e677ef2f",
+ "vpd_ieee1": "6c9ce9006cc7be73",
+ "vpd_t10": "Nimble 022e0240e677ef2f6c9ce9006cc7be73",
+ "writable": false
+ }
+ ],
+ "volume_collections":
+ [
+ "volcoll2": {
+ "id": "077321e77f009f2013000000000000000000000005",
+ "name": "volcoll2"
+ }
+ ],
+ "volumes":
+ [
+ "10.18.180.239-ansible-vol1": {
+ "id": "067321e77f009f2013000000000000000000000230",
+ "name": "10.18.180.239-ansible-vol1"
+ },
+ "changed-volname": {
+ "id": "067321e77f009f201300000000000000000000022f",
+ "name": "changed-volname"
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+import re
+
+limit_not_supported = [
+ "controllers",
+ "disks",
+ "shelves",
+ "software_versions"
+]
+
+
+def add_to_valid_subset_list(valid_subset_list,
+ subset_name,
+ subset_options,
+ fetch_all=False):
+
+ if valid_subset_list is None:
+ return []
+ valid_subset = {}
+ fields = query = limit = None
+ detail = True # default
+ count = -1
+
+ if subset_options is not None:
+ if 'fields' in subset_options and subset_options['fields'] is not None:
+ temp = ""
+ for item in subset_options['fields']:
+ temp += item + ','
+ fields = temp.strip(',')
+ # fields = subset_options['fields'].strip()
+ if 'detail' in subset_options and subset_options['detail'] is not None:
+ detail = subset_options['detail']
+ if 'limit' in subset_options:
+ count = limit = subset_options['limit']
+ if fetch_all is True:
+ # few subset do not support limit option. hence in case of subset 'all' ,set it to none
+ if subset_name in limit_not_supported:
+ limit = None
+
+ if subset_options is not None and 'query' in subset_options:
+ query = subset_options['query']
+
+ valid_subset['name'] = subset_name.lower()
+ valid_subset['fields'] = fields
+ valid_subset['query'] = query
+ valid_subset['limit'] = limit
+ valid_subset['detail'] = detail
+ valid_subset['count'] = count
+ valid_subset_list.append(dict(valid_subset))
+ return valid_subset_list
+
+
+def is_subset_option_valid(subset_options):
+ if subset_options is None:
+ return (True, "", "")
+ if isinstance(subset_options, dict) is False:
+ raise Exception("Subset options should be provided as dictionary.")
+ for key, value in subset_options.items():
+ if key != "fields" and key != "query" and key != "limit" and key != "detail":
+ return (False, key, "Valid subset option names are:'fields', 'query', 'limit', and 'detail'")
+ if key == 'limit' and type(value) is not int:
+ return (False, key, "Subset options 'limit' should be provided as integer.")
+ if key == 'detail' and type(value) is not bool:
+ return (False, key, "Subset options 'detail' should be provided as bool.")
+ if key == 'fields' and type(value) is not list:
+ return (False, key, "Subset options 'fields' should be provided as list.")
+ if key == 'query' and type(value) is not dict:
+ return (False, key, "Subset options 'query' should be provided as dict.")
+ return (True, "", "")
+
+
+def is_subset_already_added(key, valid_subset_list):
+ if valid_subset_list is None:
+ return False
+ for item in valid_subset_list:
+ if key == item['name']:
+ return True
+ return False
+
+
+def handle_all_subset(info_subset, valid_subset_list, subset_options):
+
+ if valid_subset_list is None or info_subset is None:
+ return []
+ msg = "Subset options 'fields and query' cannot be used with 'all' subset. Only 'limit and detail' option can be used."
+
+ if subset_options is not None:
+ if 'fields' in subset_options or 'query' in subset_options:
+ raise Exception(msg)
+
+ for key, value in info_subset.items():
+ if (is_subset_already_added(key, valid_subset_list) is False
+ and (key != 'minimum' and key != 'config' and key != 'snapshots')):
+ add_to_valid_subset_list(valid_subset_list, key, subset_options, True)
+ return valid_subset_list
+
+
+def raise_invalid_subset_ex(key):
+ msg = f"Subset name '{key}' is not valid. Please provide a correct subset name."
+ raise Exception(msg)
+
+
+def raise_repeat_subset_ex(key):
+ msg = f"Subset '{key}' is already provided as input. Please remove one entry."
+ raise Exception(msg)
+
+
+def raise_subset_mutually_exclusive_ex():
+ msg = "Subset 'all' and 'minimum' are mutually exclusive. Please provide only one of them"
+ raise Exception(msg)
+
+
+def parse_subset_list(info_subset, gather_subset):
+ valid_subset_list = []
+ try:
+ if gather_subset is None or isinstance(gather_subset, list) is False:
+ add_to_valid_subset_list(valid_subset_list, 'minimum', None)
+ return valid_subset_list
+ # each entry in gather subset represents a dictonary or list for each object set
+ for object_set in gather_subset:
+ object_set_type = type(object_set)
+
+ if object_set_type is dict:
+ for key, subset_options in object_set.items():
+ key = key.strip()
+ if info_subset.get(key, None) is None:
+ raise_invalid_subset_ex(key)
+ flag, param_key, err_msg = is_subset_option_valid(subset_options)
+
+ if flag is False:
+ msg = f"Invalid subset option '{param_key}' provided for subset '{key}'."
+ raise Exception(msg + ' ' + err_msg)
+ else:
+ if key == 'all':
+ if is_subset_already_added('minimum', valid_subset_list) is True:
+ raise_subset_mutually_exclusive_ex()
+ handle_all_subset(info_subset, valid_subset_list, subset_options)
+ continue
+ if key == 'minimum' or key == 'config':
+ if subset_options is not None:
+ raise Exception("Subset options cannot be used with 'minimum' and 'config' subset.")
+ if key == 'minimum':
+ if is_subset_already_added('all', valid_subset_list) is True:
+ raise_subset_mutually_exclusive_ex()
+ elif is_subset_already_added(key, valid_subset_list) is True:
+ raise_repeat_subset_ex(key)
+ add_to_valid_subset_list(valid_subset_list, key, subset_options)
+ elif object_set_type is str:
+ key = object_set.strip()
+ if info_subset.get(key, None) is None:
+ raise_invalid_subset_ex(key)
+
+ if is_subset_already_added(key, valid_subset_list) is True:
+ raise_repeat_subset_ex(key)
+
+ if key == 'all':
+ if is_subset_already_added('minimum', valid_subset_list) is True:
+ raise_subset_mutually_exclusive_ex()
+ handle_all_subset(info_subset, valid_subset_list, None)
+ continue
+
+ add_to_valid_subset_list(valid_subset_list, key, None)
+ return (valid_subset_list)
+ except Exception as ex:
+ raise(ex)
+
+
+def generate_dict(name, resp):
+ temp_dict = {}
+ if utils.is_null_or_empty(resp) or name is None:
+ return {}
+ for item in resp:
+ key = item.attrs.get(name)
+ if key in temp_dict:
+ # we need to convert the dict into a list of items as we have more than one item for the same key
+ temp_list = [temp_dict[key]]
+ if isinstance(temp_dict[key], dict) is True:
+ temp_dict.pop(key)
+ temp_dict.setdefault(key, temp_list).append(item.attrs)
+ elif key is None or key == "N/A":
+ temp_dict.setdefault(name, []).append(item.attrs)
+ else:
+ temp_dict[key] = item.attrs
+ return temp_dict
+
+
+def fetch_config_subset(info_subset):
+ if info_subset is None:
+ return ({}, True)
+ toreturn = {'config': {}}
+ result = {}
+ temp_dict = {}
+ grp_fields = """
+ smtp_server,
+ smtp_port,
+ smtp_auth_enabled,
+ smtp_auth_username,
+ autosupport_enabled,
+ send_alert_to_support,
+ isns_enabled,
+ snmp_trap_enabled,
+ snmp_trap_host,
+ snmp_trap_port,
+ snmp_community,
+ domain_name,
+ dns_servers,
+ ntp_server,
+ syslogd_enabled,
+ syslogd_server,
+ vvol_enabled,
+ alarms_enabled,
+ member_list,
+ encryption_config,
+ name,
+ fc_enabled,
+ iscsi_enabled
+
+ """
+ try:
+ for key, cl_obj in info_subset.items():
+ if key == 'arrays':
+ resp = cl_obj.list(detail=True, fields="extended_model,full_name,all_flash,serial,role")
+ elif key == 'groups':
+ resp = cl_obj.list(detail=True, fields=re.sub('\\s+', '', grp_fields))
+ elif key == 'pools':
+ resp = cl_obj.list(detail=True, fields="array_count,dedupe_all_volumes,dedupe_capable,is_default,name,vol_list")
+ elif key == 'network_configs':
+ resp = cl_obj.list(detail=True)
+ else:
+ continue
+ temp_dict[key] = resp
+ # prepare
+ result['arrays'] = generate_dict('arrays', temp_dict['arrays'])['arrays']
+ result['groups'] = generate_dict('groups', temp_dict['groups'])['groups']
+ result['pools'] = generate_dict('pools', temp_dict['pools'])['pools']
+ result['network_configs'] = generate_dict('network_configs', temp_dict['network_configs'])['network_configs']
+ toreturn['config'] = result
+ return (toreturn, True)
+ except Exception:
+ raise
+
+
+def fetch_minimum_subset(info_subset):
+
+ if info_subset is None:
+ return ({}, True)
+ minimum_subset = [
+ "arrays",
+ "disks",
+ "folders",
+ "groups",
+ "initiator_groups",
+ "performance_policies",
+ "pools",
+ "protection_schedules",
+ "protection_templates",
+ "protocol_endpoints",
+ "snapshot_collections",
+ "software_versions",
+ "users",
+ "volumes",
+ "volume_collections"
+ ]
+ toreturn = {'default': {}}
+ result = {}
+ temp_dict = {}
+
+ try:
+ for key in minimum_subset:
+ cl_obj = info_subset[key]
+ if key == 'arrays':
+ resp = cl_obj.list(detail=True, fields="extended_model,full_name,all_flash")
+ elif key == 'groups':
+ # certain fields were only added in NimOS 5.1 and above
+ if utils.is_array_version_above_or_equal(info_subset['arrays'], "5.1"):
+ resp = cl_obj.list(detail=True,
+ fields="encryption_config,name,fc_enabled,iscsi_enabled,leader_array_name,default_iscsi_target_scope,num_snaps")
+ else:
+ resp = cl_obj.list(detail=True, fields="name")
+ else:
+ resp = cl_obj.list(detail=False)
+ temp_dict[key] = resp
+ # prepare
+ result['volumes'] = len(temp_dict['volumes'])
+ result['volume_collections'] = len(temp_dict['volume_collections'])
+ result['users'] = len(temp_dict['users'])
+ result['software_versions'] = temp_dict['software_versions'][-1].attrs.get('version') # get the latest
+ result['snapshot_collections'] = len(temp_dict['snapshot_collections'])
+ result['snapshots'] = temp_dict['groups'][-1].attrs.get('num_snaps')
+ result['protocol_endpoints'] = len(temp_dict['protocol_endpoints'])
+ result['protection_templates'] = len(temp_dict['protection_templates'])
+ result['protection_schedules'] = len(temp_dict['protection_schedules'])
+ result['initiator_groups'] = len(temp_dict['initiator_groups'])
+ result['folders'] = len(temp_dict['folders'])
+ result['disks'] = len(temp_dict['disks'])
+ result['folders'] = len(temp_dict['folders'])
+ result['arrays'] = generate_dict('arrays', temp_dict['arrays'])['arrays']
+ result['groups'] = generate_dict('groups', temp_dict['groups'])['groups']
+ toreturn['default'] = result
+ return (toreturn, True)
+ except Exception as ex:
+ result['failed'] = str(ex)
+ toreturn['default'] = result
+ return (toreturn, False)
+
+# snapshots actually needs a vol_name/vol_id as mandatory params. Hence ,in case of 'all' subset
+# where user cannot provide a query option. we need to fetch the snapshots by iterating
+# over the list of volumes and see if those volumes have snapshots.
+
+
+def fetch_snapshots_for_all_subset(subset, client_obj):
+ if subset is None or client_obj is None:
+ return {}
+ result = {}
+ total_snap = []
+ # get the volume list
+ vol_list_resp = client_obj.volumes.list(detail=False)
+ if vol_list_resp is not None and vol_list_resp.__len__() > 0:
+ for vol_item in vol_list_resp:
+ vol_name = vol_item.attrs.get('name')
+ snap_list = client_obj.snapshots.list(detail=subset['detail'], vol_name=vol_name, limit=subset['limit'])
+ if snap_list is not None and snap_list.__len__() > 0:
+ total_snap.extend(snap_list)
+ if subset['limit'] is not None and total_snap.__len__() >= subset['limit']:
+ total_snap = total_snap[0:subset['limit']]
+ break
+ if total_snap.__len__() > 0:
+ result['snapshots'] = generate_dict('snapshots', total_snap)['snapshots']
+ return result
+
+
+def fetch_subset(valid_subset_list, info_subset):
+ if valid_subset_list is None or isinstance(valid_subset_list, list) is False:
+ return {}
+ try:
+ result_dict = {}
+ resp = None
+ for subset in valid_subset_list:
+ result = {}
+ try:
+ if subset['name'] == "minimum":
+ result, flag = fetch_minimum_subset(info_subset)
+ if flag is False:
+ raise Exception(result)
+ elif subset['name'] == "config":
+ result, flag = fetch_config_subset(info_subset)
+ if flag is False:
+ raise Exception(result)
+ elif subset['name'] == "all":
+ result = fetch_snapshots_for_all_subset(subset, info_subset['all'])
+ for key, value in result.items():
+ result_dict[key] = value
+ continue
+ else:
+ # if subset is user_policies then make sure nimos aversion is fiji and above
+ if subset['name'] == 'user_policies' and utils.is_array_version_above_or_equal(info_subset['arrays'], "5.1.0") is False:
+ continue
+ cl_obj_set = info_subset[subset['name']]
+ query = subset['query']
+ if query is not None:
+ resp = cl_obj_set.list(detail=subset['detail'], **query, fields=subset['fields'], limit=subset['limit'])
+ else:
+ resp = cl_obj_set.list(detail=subset['detail'], fields=subset['fields'], limit=subset['limit'])
+ if resp is not None and resp.__len__() != 0:
+ # limit is not supported for few subset, hence for those slice the result and keep the number as asked by user.
+ if subset['count'] != -1 and resp.__len__() > subset['count']:
+ resp = resp[: subset['count']]
+
+ result[subset['name']] = generate_dict('data', resp)['data']
+ else:
+ result[subset['name']] = resp
+ for key, value in result.items():
+ result_dict[key] = value
+ except Exception as ex:
+ msg = f"Failed to fetch {subset['name']} details. Error:'{str(ex)}'"
+ raise Exception(msg) from ex
+ return result_dict
+ except Exception:
+ raise
+
+
+def intialize_info_subset(client_obj):
+
+ info_subset = {
+ "all": client_obj,
+ "minimum": client_obj,
+ "config": client_obj,
+ "access_control_records": client_obj.access_control_records,
+ "alarms": client_obj.alarms,
+ "application_servers": client_obj.application_servers,
+ "application_categories": client_obj.application_categories,
+ "arrays": client_obj.arrays,
+ "chap_users": client_obj.chap_users,
+ "controllers": client_obj.controllers,
+ "disks": client_obj.disks,
+ "fibre_channel_interfaces": client_obj.fibre_channel_interfaces,
+ "fibre_channel_configs": client_obj.fibre_channel_configs,
+ "fibre_channel_initiator_aliases": client_obj.fibre_channel_initiator_aliases,
+ "fibre_channel_ports": client_obj.fibre_channel_ports,
+ "folders": client_obj.folders,
+ "groups": client_obj.groups,
+ "initiator_groups": client_obj.initiator_groups,
+ "initiators": client_obj.initiators,
+ "master_key": client_obj.master_key,
+ "network_configs": client_obj.network_configs,
+ "network_interfaces": client_obj.network_interfaces,
+ "performance_policies": client_obj.performance_policies,
+ "pools": client_obj.pools,
+ "protection_schedules": client_obj.protection_schedules,
+ "protection_templates": client_obj.protection_templates,
+ "protocol_endpoints": client_obj.protocol_endpoints,
+ "replication_partners": client_obj.replication_partners,
+ "shelves": client_obj.shelves,
+ "snapshots": client_obj.snapshots,
+ "snapshot_collections": client_obj.snapshot_collections,
+ "software_versions": client_obj.software_versions,
+ "user_groups": client_obj.user_groups,
+ "user_policies": client_obj.user_policies,
+ "users": client_obj.users,
+ "volumes": client_obj.volumes,
+ "volume_collections": client_obj.volume_collections
+ }
+ return info_subset
+
+
+def get_subset_info(
+ client_obj,
+ gather_subset):
+
+ if utils.is_null_or_empty(gather_subset):
+ return (False, False, "Please provide atleast one subset.", {})
+ result_dict = []
+ try:
+ info_subset = intialize_info_subset(client_obj)
+ valid_subset_list = parse_subset_list(info_subset, gather_subset)
+ if valid_subset_list is not None and valid_subset_list.__len__() > 0:
+ # we got subset list to work on. get the details of these subset
+ result_dict = fetch_subset(valid_subset_list, info_subset)
+ return (True, False, "Fetched the subset details.", result_dict)
+ else:
+ return (True, False, "No vaild subset provided.", result_dict)
+ except Exception as ex:
+ return (False, False, f"{ex}", {})
+
+
+def main():
+
+ fields = {
+ "gather_subset": {
+ "required": False,
+ "type": "list",
+ "elements": 'raw',
+ 'default': "minimum"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ gather_subset = module.params["gather_subset"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ return_status, changed, msg, result_dict = get_subset_info(client_obj, gather_subset)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(result_dict) is False and result_dict.__len__() > 0:
+ module.exit_json(return_status=return_status,
+ changed=changed,
+ message=msg,
+ nimble_info=result_dict)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_initiator_group.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_initiator_group.py
new file mode 100644
index 00000000..1a2c9c58
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_initiator_group.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the HPE Nimble Storage initiator groups.
+module: hpe_nimble_initiator_group
+options:
+ access_protocol:
+ choices:
+ - iscsi
+ - fc
+ required: False
+ type: str
+ description:
+ - Initiator group access protocol.
+ app_uuid:
+ required: False
+ type: str
+ description:
+ - Application identifier of initiator group. String of up to 255 alphanumeric characters, hyphen, colon, dot and underscore are allowed.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing initiator group.
+ description:
+ required: False
+ type: str
+ description:
+ - Text description of initiator group.
+ fc_initiators:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of FC initiators. When create/update fc_initiators, WWPN is required.
+ fc_tdz_ports:
+ required: False
+ type: list
+ elements: int
+ description:
+ - List of target fibre channel ports with target driven zoning configured on this initiator group.
+ host_type:
+ required: False
+ type: str
+ description:
+ - Initiator group host type. Available options are auto and hpux. The default option is auto. This attribute will be
+ applied to all the initiators in the initiator group. Initiators with different host OSes should not be kept in the
+ same initiator group having a non-default host type attribute.
+ iscsi_initiators:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of iSCSI initiators. When create/update iscsi_initiators, either iqn or ip_address is always required with label.
+ metadata:
+ required: False
+ type: dict
+ description:
+ - Key-value pairs that augment an initiator group's attributes. List of key-value pairs. Keys must be unique and non-empty.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the initiator group.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The initiator group operation.
+ target_subnets:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of target subnet labels. If specified, discovery and access to volumes will be restricted to the specified subnets.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage initiator groups
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create, then create ig. Fails if already present.
+# if state is present, then create ig if not present. Succeeds if it already exists.
+- name: Create an igroup
+ hpe.nimble.hpe_nimble_initiator_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ access_protocol: "{{ access_protocol | default('iscsi')}}"
+ name: "{{ name }}"
+ iscsi_initiators: "{{ iscsi_initiators | default([])}}" # list of dictionaries. Each entry in the dictionary has one initiator details.
+ description: "{{ description | default(None) }}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete igroup
+ hpe.nimble.hpe_nimble_initiator_group:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ access_protocol: "{{ access_protocol | default('iscsi')}}"
+ name: "{{ name }}"
+ state: absent
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_igroup(
+ client_obj,
+ initiator_group_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(initiator_group_name):
+ return (False, False, "Initiator group creation failed. Initiator group name is null.", {}, {})
+
+ try:
+ ig_resp = client_obj.initiator_groups.get(id=None, name=initiator_group_name)
+ if utils.is_null_or_empty(ig_resp):
+ # remove unchanged and null arguments from kwargs
+ params = utils.remove_null_args(**kwargs)
+ ig_resp = client_obj.initiator_groups.create(name=initiator_group_name, **params)
+ return (True, True, f"Created initiator Group '{initiator_group_name}' successfully.", {}, ig_resp.attrs)
+ else:
+ return (False, False, f"Cannot create initiator Group '{initiator_group_name}' as it is already present in given state.", {}, {})
+
+ except Exception as ex:
+ return (False, False, f"Initiator group creation failed | {ex}", {}, {})
+
+
+def update_igroup(
+ client_obj,
+ ig_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(ig_resp):
+ return (False, False, "Update initiator group failed as it is not present.", {}, {})
+ try:
+ ig_name = ig_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(ig_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ ig_resp = client_obj.initiator_groups.update(id=ig_resp.attrs.get("id"), **params)
+ return (True, True, f"Initiator group '{ig_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, ig_resp.attrs)
+ else:
+ return (True, False, f"Initiator group '{ig_name}' already present in given state.", {}, ig_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Initiator group update failed | {ex}", {}, {})
+
+
+def delete_igroup(
+ client_obj,
+ initiator_group_name):
+
+ if utils.is_null_or_empty(initiator_group_name):
+ return (False, False, "Initiator group deletion failed as it is not present.", {})
+
+ try:
+ # see if the igroup is already present
+ ig_resp = client_obj.initiator_groups.get(id=None, name=initiator_group_name)
+ if ig_resp is not None:
+ client_obj.initiator_groups.delete(ig_resp.attrs.get("id"))
+ return (True, True, f"Successfully deleted initiator group '{initiator_group_name}'.", {})
+ elif ig_resp is None:
+ return (False, False, f"Initiator group '{initiator_group_name}' is not present on array.", {})
+ else:
+ return (False, False, f"Failed to delete initiator group '{initiator_group_name}'.", {})
+ except Exception as ex:
+ return (False, False, f"Initiator group deletion failed | {ex}", {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "access_protocol": {
+ "choices": ['iscsi',
+ 'fc'
+ ],
+ "required": False,
+ "type": "str"
+ },
+ "host_type": {
+ "required": False,
+ "type": "str"
+ },
+ "fc_tdz_ports": {
+ "required": False,
+ "type": "list",
+ "elements": 'int'
+ },
+ "target_subnets": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "iscsi_initiators": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "fc_initiators": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "app_uuid": {
+ "required": False,
+ "type": "str"
+ },
+ "metadata": {
+ "required": False,
+ "type": "dict"
+ }
+ }
+
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['access_protocol'])]
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ initiator_group_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ access_protocol = module.params["access_protocol"]
+ host_type = module.params["host_type"]
+ fc_tdz_ports = module.params["fc_tdz_ports"]
+ target_subnets = module.params["target_subnets"]
+ iscsi_initiators = module.params["iscsi_initiators"]
+ fc_initiators = module.params["fc_initiators"]
+ app_uuid = module.params["app_uuid"]
+ metadata = module.params["metadata"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ ig_resp = client_obj.initiator_groups.get(id=None, name=initiator_group_name)
+ if utils.is_null_or_empty(ig_resp) or state == "create":
+
+ return_status, changed, msg, changed_attrs_dict, resp = create_igroup(
+ client_obj,
+ initiator_group_name,
+ description=description,
+ access_protocol=access_protocol,
+ host_type=host_type,
+ fc_tdz_ports=fc_tdz_ports,
+ target_subnets=target_subnets,
+ iscsi_initiators=iscsi_initiators,
+ fc_initiators=fc_initiators,
+ app_uuid=app_uuid,
+ metadata=metadata)
+ else:
+ return_status, changed, msg, changed_attrs_dict, resp = update_igroup(
+ client_obj,
+ ig_resp,
+ name=change_name,
+ description=description,
+ host_type=host_type,
+ fc_tdz_ports=fc_tdz_ports,
+ target_subnets=target_subnets,
+ iscsi_initiators=iscsi_initiators,
+ fc_initiators=fc_initiators,
+ metadata=metadata)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_igroup(client_obj, initiator_group_name)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_network.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_network.py
new file mode 100644
index 00000000..93c8d3e9
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_network.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+
+# # Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the storage network configuration on the HPE Nimble Storage group.
+module: hpe_nimble_network
+options:
+ activate:
+ required: False
+ type: bool
+ description:
+ - Activate a network configuration.
+ array:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of array network configs.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing network config.
+ iscsi_automatic_connection_method:
+ required: False
+ type: bool
+ description:
+ - Whether automatic connection method is enabled. Enabling this means means redirecting connections from the specified iSCSI
+ discovery IP address to the best data IP address based on connection counts.
+ iscsi_connection_rebalancing:
+ required: False
+ type: bool
+ description:
+ - Whether rebalancing is enabled. Enabling this means rebalancing iSCSI connections by periodically breaking existing
+ connections that are out-of-balance, allowing the host to reconnect to a more appropriate data IP address.
+ ignore_validation_mask:
+ required: False
+ type: int
+ description:
+ - Indicates whether to ignore the validation.
+ mgmt_ip:
+ required: False
+ type: str
+ description:
+ - Management IP address for the Group. Four numbers in the range (0,255) separated by periods.
+ name:
+ required: True
+ type: str
+ choices:
+ - active
+ - backup
+ - draft
+ description:
+ - Name of the network configuration. Use the name 'draft' when creating a draft configuration.
+ secondary_mgmt_ip:
+ required: False
+ type: str
+ description:
+ - Secondary management IP address for the Group. Four numbers in the range [0,255] separated by periods.
+ subnet:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of subnet configs.
+ route:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of static routes.
+ state:
+ required: True
+ choices:
+ - create
+ - present
+ - absent
+ type: str
+ description:
+ - The network config operation.
+ validate:
+ required: False
+ type: bool
+ description:
+ - Validate a network configuration.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage network configuration
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create, then create network config, fails if it exist or cannot create
+# if state is present, then create network config if not present ,else success
+- name: Create network config
+ hpe.nimble.hpe_nimble_network:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ route: "{{ route }}"
+ subnet: "{{ subnet }}"
+ array: "{{ array }}"
+ iscsi_automatic_connection_method: true
+ iscsi_connection_rebalancing: False
+ mgmt_ip: "{{ mgmt_ip }}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete network config
+ hpe.nimble.hpe_nimble_network:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "absent"
+
+- name: Validate network config
+ hpe.nimble.hpe_nimble_network:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "present"
+ ignore_validation_mask: 1
+ validate: true
+
+- name: Activate Network config
+ hpe.nimble.hpe_nimble_network:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "present"
+ ignore_validation_mask: 1
+ activate: true
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_update_network_config(
+ client_obj,
+ name,
+ state,
+ iscsi_automatic_connection_method,
+ iscsi_connection_rebalancing,
+ mgmt_ip,
+ change_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(name):
+ return (False, False, "Create network config failed as name is not present.", {}, {})
+
+ try:
+ network_resp = client_obj.network_configs.get(id=None, name=name)
+ if utils.is_null_or_empty(network_resp):
+ params = utils.remove_null_args(**kwargs)
+ network_resp = client_obj.network_configs.create(name=name,
+ iscsi_automatic_connection_method=iscsi_automatic_connection_method,
+ iscsi_connection_rebalancing=iscsi_connection_rebalancing,
+ mgmt_ip=mgmt_ip,
+ **params)
+ return (True, True, f"Network config '{name}' created successfully.", {}, network_resp.attrs)
+ else:
+ if state == "create":
+ return (False, False, f"Network config '{name}' cannot be created as it is already present in given state.", {}, network_resp.attrs)
+
+ # update case
+ kwargs['name'] = change_name
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(network_resp, **kwargs)
+ # even though some of the attributes have not changed but it still has to be passed in case of update.
+ params = utils.remove_null_args(**kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ network_resp = client_obj.network_configs.update(id=network_resp.attrs.get("id"),
+ name=name,
+ iscsi_automatic_connection_method=iscsi_automatic_connection_method,
+ iscsi_connection_rebalancing=iscsi_connection_rebalancing,
+ mgmt_ip=mgmt_ip,
+ **params)
+ return (True, True, f"Network config '{name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, network_resp.attrs)
+ else:
+ return (True, False, f"Network config '{network_resp.attrs.get('name')}' already present in given state.", {}, network_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Network config creation failed |'{ex}'", {}, {})
+
+
+def delete_network_config(
+ client_obj,
+ name):
+
+ if utils.is_null_or_empty(name):
+ return (False, False, "Delete network config failed as name is not present.", {})
+
+ try:
+ network_resp = client_obj.network_configs.get(id=None, name=name)
+ if utils.is_null_or_empty(network_resp):
+ return (False, False, f"Network config '{name}' cannot be deleted as it is not present.", {})
+
+ client_obj.network_configs.delete(id=network_resp.attrs.get("id"))
+ return (True, True, f"Deleted network config '{name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Delete network config failed |'{ex}'", {})
+
+
+def validate_network_config(
+ client_obj,
+ name,
+ ignore_validation_mask):
+
+ if utils.is_null_or_empty(name):
+ return (False, False, "Validate network config failed as name is not present.", {})
+
+ try:
+ network_resp = client_obj.network_configs.get(id=None, name=name)
+ if utils.is_null_or_empty(network_resp):
+ return (False, False, f"Network config '{name}' cannot be validated as it is not present.", {})
+
+ client_obj.network_configs.validate_netconfig(
+ id=network_resp.attrs.get("id"),
+ ignore_validation_mask=ignore_validation_mask)
+
+ return (True, False, f"Validated network config '{name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Validate Network config failed |'{ex}'", {})
+
+
+def activate_network_config(
+ client_obj,
+ name,
+ ignore_validation_mask):
+
+ if utils.is_null_or_empty(name):
+ return (False, False, "Activate network config failed as name is not present.", {})
+
+ try:
+ network_resp = client_obj.network_configs.get(id=None, name=name)
+ if utils.is_null_or_empty(network_resp):
+ return (False, False, f"Network config '{name}' cannot be activated as it is not present.", {})
+
+ client_obj.network_configs.activate_netconfig(id=network_resp.attrs.get("id"),
+ ignore_validation_mask=ignore_validation_mask)
+
+ return (True, True, f"Activated network config '{name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Activate Network config failed |'{ex}'", {})
+
+
+def main():
+
+ fields = {
+ "activate": {
+ "required": False,
+ "type": "bool"
+ },
+ "array": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "iscsi_automatic_connection_method": {
+ "required": False,
+ "type": "bool"
+ },
+ "iscsi_connection_rebalancing": {
+ "required": False,
+ "type": "bool"
+ },
+ "ignore_validation_mask": {
+ "required": False,
+ "type": "int"
+ },
+ "mgmt_ip": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "choices": ['active',
+ 'backup',
+ 'draft'
+ ],
+ "type": "str"
+ },
+ "secondary_mgmt_ip": {
+ "required": False,
+ "type": "str"
+ },
+ "subnet": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "route": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "state": {
+ "required": True,
+ "choices": ['create',
+ 'present',
+ 'absent'
+ ],
+ "type": "str"
+ },
+ "validate": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['array', 'iscsi_automatic_connection_method', 'iscsi_connection_rebalancing', 'mgmt_ip', 'subnet', 'route'])]
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ activate = module.params["activate"]
+ array = module.params["array"]
+ iscsi_automatic_connection_method = module.params["iscsi_automatic_connection_method"]
+ iscsi_connection_rebalancing = module.params["iscsi_connection_rebalancing"]
+ ignore_validation_mask = module.params["ignore_validation_mask"]
+ mgmt_ip = module.params["mgmt_ip"]
+ name = module.params["name"]
+ change_name = module.params["change_name"]
+ secondary_mgmt_ip = module.params["secondary_mgmt_ip"]
+ subnet = module.params["subnet"]
+ route = module.params["route"]
+ state = module.params["state"]
+ validate = module.params["validate"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if ((validate is None or validate is False)
+ and (activate is None or activate is False)
+ and (state == "create" or state == "present")):
+ # if not client_obj.network_configs.get(id=None, name=name) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_update_network_config(
+ client_obj,
+ name,
+ state,
+ iscsi_automatic_connection_method,
+ iscsi_connection_rebalancing,
+ mgmt_ip,
+ change_name,
+ array_list=array,
+ ignore_validation_mask=ignore_validation_mask,
+ secondary_mgmt_ip=secondary_mgmt_ip,
+ subnet_list=subnet,
+ route_list=route)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_network_config(client_obj, name)
+
+ elif state == "present" and validate is True:
+ return_status, changed, msg, changed_attrs_dict = validate_network_config(client_obj, name, ignore_validation_mask)
+
+ elif state == "present" and activate is True:
+ return_status, changed, msg, changed_attrs_dict = activate_network_config(client_obj, name, ignore_validation_mask)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_partner.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_partner.py
new file mode 100644
index 00000000..6266f473
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_partner.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+
+# # Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the replication partner on an HPE Nimble Storage group.
+module: hpe_nimble_partner
+options:
+ control_port:
+ required: False
+ type: int
+ description:
+ - Port number of partner control interface. Value -1 for an invalid port or a positive integer value up to 65535 representing the TCP/IP port.
+ data_port:
+ required: False
+ type: int
+ description:
+ - Port number of partner data interface. Value -1 for an invalid port or a positive integer value up to 65535 representing the TCP/IP port.
+ description:
+ required: False
+ type: str
+ description:
+ - Description of replication partner.
+ downstream_hostname:
+ required: True
+ type: str
+ description:
+ - IP address or hostname of partner interface. This must be the partner's Group Management IP address.
+ String of up to 64 alphanumeric characters, - and . and ':' are allowed after first character.
+ folder:
+ required: False
+ type: str
+ description:
+ - The Folder ID within the pool where volumes replicated from this partner will be created. This is not supported for pool partners.
+ match_folder:
+ required: False
+ type: bool
+ description:
+ - Indicates whether to match the upstream volume's folder on the downstream.
+ name:
+ required: False
+ type: str
+ description:
+ - Name of replication partner. String of up to 64 alphanumeric characters, - and . and ':' are allowed after first character.
+ pause:
+ required: False
+ type: bool
+ description:
+ - Pause replication for the specified partner.
+ pool:
+ required: False
+ type: str
+ description:
+ - The pool name where volumes replicated from this partner will be created. Replica volumes created as clones ignore
+ this parameter and are always created in the same pool as their parent volume.
+ repl_data_hostname:
+ required: False
+ type: str
+ description:
+ - IP address or hostname of partner data interface. String of up to 64 alphanumeric characters, - and . and ':' are allowed after first character.
+ resume:
+ required: False
+ type: bool
+ description:
+ - Resume replication for the specified partner.
+ secret:
+ required: False
+ type: str
+ description:
+ - Replication partner shared secret, used for mutual authentication of the partners.
+ state:
+ required: True
+ choices:
+ - create
+ - present
+ - absent
+ type: str
+ description:
+ - The replication partner operation.
+ subnet_label:
+ required: False
+ type: str
+ description:
+ - Indicates whether to match the upstream volume's folder on the downstream.
+ subnet_type:
+ required: False
+ choices:
+ - invalid
+ - unconfigured
+ - unconfigured
+ - mgmt
+ - data
+ - mgmt_data
+ type: str
+ description:
+ - Type of the subnet used to replicate to this partner.
+ test:
+ required: False
+ type: bool
+ description:
+ - Test connectivity to the specified partner.
+ throttles:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - Throttles used while replicating from/to this partner. All the throttles for the partner.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage Replication Partner
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create, then create partner, fails if it exist or cannot create
+# if state is present, then create partner if not present ,else success
+- name: Create Partner
+ hpe.nimble.hpe_nimble_partner:
+ host: "{{ host }}" # upstream host
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name | mandatory }}"
+ description: "{{ description }}"
+ downstream_hostname: "{{ downstream_hostname | mandatory }}"
+ secret: "{{ secret | mandatory }}"
+ subnet_label: "{{ subnet_label | mandatory }}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete Partner
+ hpe.nimble.hpe_nimble_partner:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ downstream_hostname: "{{ downstream_hostname | mandatory }}"
+ state: "absent"
+
+- name: Test Partner
+ hpe.nimble.hpe_nimble_partner:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ downstream_hostname: "{{ downstream_hostname | mandatory }}"
+ state: "present"
+ test: true
+
+- name: Pause Partner
+ hpe.nimble.hpe_nimble_partner:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ downstream_hostname: "{{ downstream_hostname | mandatory }}"
+ state: "present"
+ pause: true
+
+- name: Resume Partner
+ hpe.nimble.hpe_nimble_partner:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ downstream_hostname: "{{ downstream_hostname | mandatory }}"
+ state: "present"
+ resume: true
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_partner(
+ client_obj,
+ downstream_hostname, # downstream
+ **kwargs):
+
+ if utils.is_null_or_empty(downstream_hostname):
+ return (False, False, "Create replication partner failed as name is not present.", {})
+
+ try:
+ upstream_repl_resp = client_obj.replication_partners.get(id=None, hostname=downstream_hostname)
+ if utils.is_null_or_empty(upstream_repl_resp):
+ params = utils.remove_null_args(**kwargs)
+ upstream_repl_resp = client_obj.replication_partners.create(hostname=downstream_hostname, **params)
+ return (True, True, f"Replication partner '{downstream_hostname}' created successfully.", {}, upstream_repl_resp.attrs)
+ else:
+ return (False, False, f"Replication partner '{downstream_hostname}' cannot be created as it is already present in given state.",
+ {}, upstream_repl_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Replication partner creation failed |{ex}", {}, {})
+
+
+def update_partner(
+ client_obj,
+ downstream_hostname, # downstream
+ secret,
+ **kwargs):
+
+ if utils.is_null_or_empty(downstream_hostname):
+ return (False, False, "Update replication partner failed as no downstream partner is provided.", {}, {})
+
+ try:
+ upstream_repl_resp = client_obj.replication_partners.get(id=None, hostname=downstream_hostname)
+ if utils.is_null_or_empty(upstream_repl_resp):
+ return (False, False, f"Replication partner '{downstream_hostname}' cannot be updated as it is not present.", {}, {})
+
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(upstream_repl_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ upstream_repl_resp = client_obj.replication_partners.update(id=upstream_repl_resp.attrs.get("id"), secret=secret, **params)
+ return (True, True, f"Replication partner '{downstream_hostname}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, upstream_repl_resp.attrs)
+ else:
+ return (True, False, f"Replication partner '{upstream_repl_resp.attrs.get('name')}' already present in given state.", {}, upstream_repl_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Replication partner update failed |{ex}", {}, {})
+
+
+def delete_partner(
+ client_obj,
+ downstream_hostname):
+
+ if utils.is_null_or_empty(downstream_hostname):
+ return (False, False, "Delete replication partner failed as no downstream partner is provided.", {})
+
+ try:
+ upstream_repl_resp = client_obj.replication_partners.get(id=None, hostname=downstream_hostname)
+ if utils.is_null_or_empty(upstream_repl_resp):
+ return (False, False, f"Replication partner '{downstream_hostname}' cannot be deleted as it is not present.", {})
+ client_obj.replication_partners.delete(id=upstream_repl_resp.attrs.get("id"))
+
+ return (True, True, f"Deleted replication partner '{downstream_hostname}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Delete replication partner failed |{ex}", {})
+
+
+def test_partner(
+ client_obj,
+ downstream_hostname):
+
+ if utils.is_null_or_empty(downstream_hostname):
+ return (False, False, "Test replication partner failed as no downstream partner is provided.", {})
+
+ try:
+ upstream_repl_resp = client_obj.replication_partners.get(id=None, hostname=downstream_hostname)
+ if utils.is_null_or_empty(upstream_repl_resp):
+ return (False, False, f"Replication partner '{downstream_hostname}' cannot be tested as it is not present.", {})
+
+ client_obj.replication_partners.test(id=upstream_repl_resp.attrs.get("id"))
+ return (True, False, f"Tested replication partner '{downstream_hostname}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Test replication partner failed |{ex}", {})
+
+
+def pause_partner(
+ client_obj,
+ downstream_hostname):
+
+ if utils.is_null_or_empty(downstream_hostname):
+ return (False, False, "Pause replication partner failed as no downstream partner is provided.", {})
+
+ try:
+ upstream_repl_resp = client_obj.replication_partners.get(id=None, hostname=downstream_hostname)
+ if utils.is_null_or_empty(upstream_repl_resp):
+ return (False, False, f"Replication partner '{downstream_hostname}' cannot be paused as it is not present.", {})
+ if upstream_repl_resp.attrs.get("paused") is False:
+ client_obj.replication_partners.pause(id=upstream_repl_resp.attrs.get("id"))
+ return (True, True, f"Paused replication partner '{downstream_hostname}' successfully.", {})
+ else:
+ return (True, False, f"Replication partner '{downstream_hostname}' is already in paused state.", {})
+ except Exception as ex:
+ return (False, False, f"Pause replication partner failed |{ex}", {})
+
+
+def resume_partner(
+ client_obj,
+ downstream_hostname):
+
+ if utils.is_null_or_empty(downstream_hostname):
+ return (False, False, "Resume replication partner failed as no downstream partner is provided.", {})
+
+ try:
+ upstream_repl_resp = client_obj.replication_partners.get(id=None, hostname=downstream_hostname)
+ if utils.is_null_or_empty(upstream_repl_resp):
+ return (False, False, f"Replication partner '{downstream_hostname}' cannot be resumed as it is not present.", {})
+
+ client_obj.replication_partners.resume(id=upstream_repl_resp.attrs.get("id"))
+ return (True, True, f"Resumed replication partner '{downstream_hostname}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Resume replication partner failed |{ex}", {})
+
+
+def main():
+
+ fields = {
+ "control_port": {
+ "required": False,
+ "type": "int"
+ },
+ "data_port": {
+ "required": False,
+ "type": "int"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "folder": {
+ "required": False,
+ "type": "str"
+ },
+ "match_folder": {
+ "required": False,
+ "type": "bool"
+ },
+ "name": {
+ "required": False,
+ "type": "str"
+ },
+ "downstream_hostname": {
+ "required": True,
+ "type": "str"
+ },
+ "pause": {
+ "required": False,
+ "type": "bool"
+ },
+ "pool": {
+ "required": False,
+ "type": "str"
+ },
+ "repl_data_hostname": {
+ "required": False,
+ "type": "str"
+ },
+ "resume": {
+ "required": False,
+ "type": "bool"
+ },
+ "secret": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "subnet_label": {
+ "required": False,
+ "type": "str"
+ },
+ "subnet_type": {
+ "required": False,
+ "choices": ['invalid',
+ 'unconfigured',
+ 'unconfigured',
+ 'mgmt',
+ 'data',
+ 'mgmt_data'
+ ],
+ "type": "str"
+ },
+ "test": {
+ "required": False,
+ "type": "bool"
+ },
+ "throttles": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "state": {
+ "required": True,
+ "choices": ['create',
+ 'present',
+ 'absent'
+ ],
+ "type": "str"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['subnet_label', 'secret', 'downstream_hostname', 'name'])]
+
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ control_port = module.params["control_port"]
+ data_port = module.params["data_port"]
+ description = module.params["description"]
+ folder = module.params["folder"]
+ match_folder = module.params["match_folder"]
+ repl_partner_name = module.params["name"]
+ downstream_hostname = module.params["downstream_hostname"]
+ pause = module.params["pause"]
+ pool = module.params["pool"]
+ repl_data_hostname = module.params["repl_data_hostname"]
+ resume = module.params["resume"]
+ secret = module.params["secret"]
+ subnet_label = module.params["subnet_label"]
+ subnet_type = module.params["subnet_type"]
+ test = module.params["test"]
+ throttles = module.params["throttles"]
+ state = module.params["state"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if ((test is None or test is False)
+ and (resume is None or resume is False)
+ and (pause is None or pause is False)
+ and (state == "create" or state == "present")):
+ if not client_obj.replication_partners.get(id=None, hostname=downstream_hostname) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_partner(
+ client_obj,
+ downstream_hostname,
+ control_port=control_port,
+ data_port=data_port,
+ description=description,
+ folder_id=utils.get_folder_id(client_obj, folder),
+ match_folder=match_folder,
+ name=repl_partner_name, # downstream partner name
+ pool_id=utils.get_pool_id(client_obj, pool),
+ repl_hostname=repl_data_hostname,
+ secret=secret,
+ subnet_label=subnet_label,
+ subnet_type=subnet_type,
+ throttles=throttles)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_partner(
+ client_obj,
+ downstream_hostname,
+ secret,
+ control_port=control_port,
+ data_port=data_port,
+ description=description,
+ folder_id=utils.get_folder_id(client_obj, folder),
+ match_folder=match_folder,
+ name=repl_partner_name, # downstream partner name
+ pool_id=utils.get_pool_id(client_obj, pool),
+ repl_hostname=repl_data_hostname,
+ subnet_label=subnet_label,
+ subnet_type=subnet_type,
+ throttles=throttles)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_partner(client_obj, downstream_hostname)
+
+ elif state == "present" and test is True:
+ return_status, changed, msg, changed_attrs_dict = test_partner(client_obj, downstream_hostname)
+
+ elif state == "present" and pause is True:
+ return_status, changed, msg, changed_attrs_dict = pause_partner(client_obj, downstream_hostname)
+
+ elif state == "present" and resume is True:
+ return_status, changed, msg, changed_attrs_dict = resume_partner(client_obj, downstream_hostname)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_performance_policy.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_performance_policy.py
new file mode 100644
index 00000000..91c836ce
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_performance_policy.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the performance policies on an HPE Nimble Storage group.
+module: hpe_nimble_performance_policy
+options:
+ app_category:
+ required: False
+ type: str
+ description:
+ - Specifies the application category of the associated volume.
+ block_size:
+ required: False
+ type: int
+ description:
+ - Block Size in bytes to be used by the volumes created with this specific performance policy. Supported block sizes are
+ 4096 bytes (4 KB), 8192 bytes (8 KB), 16384 bytes(16 KB), and 32768 bytes (32 KB). Block size of a performance policy cannot
+ be changed once the performance policy is created.
+ cache:
+ required: False
+ type: bool
+ description:
+ - Flag denoting if data in the associated volume should be cached.
+ cache_policy:
+ required: False
+ choices:
+ - disabled
+ - normal
+ - aggressive
+ - no_write
+ - aggressive_read_no_write
+ type: str
+ description:
+ - Specifies how data of associated volume should be cached. Normal policy caches data but skips in certain conditions such as
+ sequential I/O. Aggressive policy will accelerate caching of all data belonging to this volume, regardless of sequentiality.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing performance policy.
+ compress:
+ required: False
+ type: bool
+ description:
+ - Flag denoting if data in the associated volume should be compressed.
+ description:
+ required: False
+ type: str
+ description:
+ - Description of a performance policy.
+ dedupe:
+ type: bool
+ description:
+ - Specifies if dedupe is enabled for volumes created with this performance policy.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the performance policy.
+ space_policy:
+ required: False
+ choices:
+ - invalid
+ - offline
+ - non_writable
+ - read_only
+ - login_only
+ type: str
+ description:
+ - Specifies the state of the volume upon space constraint violation such as volume limit violation or volumes above their volume reserve,
+ if the pool free space is exhausted. Supports two policies, 'offline' and 'non_writable'.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The performance policy operation.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage performance policies
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a performance policy if not present. Fails if already present.
+# if state is present, then create a performance policy if not present. Succeed if it already exists.
+- name: Create performance policy if not present
+ hpe.nimble.hpe_nimble_performance_policy:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: "{{ state | default('present') }}"
+ name: "{{ name }}"
+ description: "{{ description }}"
+ block_size: "{{ block_size }}"
+ compress: "{{ compress }}"
+
+- name: Delete performance policy
+ hpe.nimble.hpe_nimble_performance_policy:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: absent
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_perf_policy(
+ client_obj,
+ perf_policy_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(perf_policy_name):
+ return (False, False, "Create performance policy failed. Performance policy name is not present.", {}, {})
+
+ try:
+ perf_policy_resp = client_obj.performance_policies.get(id=None, name=perf_policy_name)
+ if utils.is_null_or_empty(perf_policy_resp):
+ params = utils.remove_null_args(**kwargs)
+ perf_policy_resp = client_obj.performance_policies.create(name=perf_policy_name,
+ **params)
+ if perf_policy_resp is not None:
+ return (True, True, f"Created performance policy '{perf_policy_name}' successfully.", {}, perf_policy_resp.attrs)
+ else:
+ return (False, False, f"Cannot create Performance policy '{perf_policy_name}' as it is already present", {}, {})
+ except Exception as ex:
+ return (False, False, f"Performance policy creation failed | {ex}", {}, {})
+
+
+def update_perf_policy(
+ client_obj,
+ perf_policy_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(perf_policy_resp):
+ return (False, False, "Update performance policy failed. Performance policy name is not present.", {}, {})
+
+ try:
+ perf_policy_name = perf_policy_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(perf_policy_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ perf_policy_resp = client_obj.performance_policies.update(id=perf_policy_resp.attrs.get("id"), **params)
+ return (True, True, f"Performance policy '{perf_policy_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, perf_policy_resp.attrs)
+ else:
+ return (True, False, f"Performance policy '{perf_policy_name}' already present in given state.", {}, perf_policy_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Performance policy update failed | {ex}", {}, {})
+
+
+def delete_perf_policy(
+ client_obj,
+ perf_policy_name):
+
+ if utils.is_null_or_empty(perf_policy_name):
+ return (False, False, "Delete performance policy failed. Performance policy name is not present.", {})
+
+ try:
+ perf_policy_resp = client_obj.performance_policies.get(id=None, name=perf_policy_name)
+ if utils.is_null_or_empty(perf_policy_resp):
+ return (False, False, f"Cannot delete Performance policy '{perf_policy_name}' as it is not present ", {})
+ else:
+ perf_policy_resp = client_obj.performance_policies.delete(id=perf_policy_resp.attrs.get("id"))
+ return (True, True, f"Deleted performance policy '{perf_policy_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Performance policy deletion failed | {ex}", {})
+
+
+def main():
+
+ fields = {
+ "app_category": {
+ "required": False,
+ "type": "str"
+ },
+ "block_size": {
+ "required": False,
+ "type": "int"
+ },
+ "cache": {
+ "required": False,
+ "type": "bool"
+ },
+ "cache_policy": {
+ "required": False,
+ "choices": ['disabled', 'normal', 'aggressive', 'no_write', 'aggressive_read_no_write'],
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "compress": {
+ "required": False,
+ "type": "bool"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "dedupe": {
+ "required": False,
+ "type": "bool"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "space_policy": {
+ "required": False,
+ "choices": ['invalid', 'offline', 'non_writable', 'read_only', 'login_only'],
+ "type": "str"
+ },
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ app_category = module.params["app_category"]
+ block_size = module.params["block_size"]
+ cache = module.params["cache"]
+ cache_policy = module.params["cache_policy"]
+ compress = module.params["compress"]
+ description = module.params["description"]
+ dedupe = module.params["dedupe"]
+ perf_policy_name = module.params["name"]
+ change_name = module.params["change_name"]
+ space_policy = module.params["space_policy"]
+ state = module.params["state"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ perf_policy_resp = client_obj.performance_policies.get(id=None, name=perf_policy_name)
+ if utils.is_null_or_empty(perf_policy_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_perf_policy(
+ client_obj,
+ perf_policy_name,
+ app_category=app_category,
+ block_size=block_size,
+ cache=cache,
+ cache_policy=cache_policy,
+ compress=compress,
+ description=description,
+ dedupe_enabled=dedupe,
+ space_policy=space_policy)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_perf_policy(
+ client_obj,
+ perf_policy_resp,
+ name=change_name,
+ app_category=app_category,
+ cache=cache,
+ cache_policy=cache_policy,
+ compress=compress,
+ description=description,
+ dedupe_enabled=dedupe,
+ space_policy=space_policy)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_perf_policy(
+ client_obj,
+ perf_policy_name)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_pool.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_pool.py
new file mode 100644
index 00000000..a0b73f74
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_pool.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the storage pools on an HPE Nimble Storage group.
+module: hpe_nimble_pool
+options:
+ array_list:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of arrays in the pool with detailed information. To create or update array list, only array ID is required.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing pool.
+ description:
+ required: False
+ type: str
+ description:
+ - Text description of pool.
+ dedupe_all_volumes:
+ type: bool
+ description:
+ - Indicates if dedupe is enabled by default for new volumes on this pool.
+ force:
+ required: False
+ type: bool
+ description:
+ - Forcibly delete the specified pool even if it contains deleted volumes whose space is being reclaimed.
+ Forcibly remove an array from array_list via an update operation even if the array is not reachable.
+ There should no volumes in the pool for the force update operation to succeed.
+ is_default:
+ required: False
+ type: bool
+ description:
+ - Indicates if this is the default pool.
+ merge:
+ required: False
+ type: bool
+ description:
+ - Merge the specified pool into the target pool. All volumes on the specified pool are moved to the target pool and the
+ specified pool is then deleted. All the arrays in the pool are assigned to the target pool.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the pool.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The pool operation.
+ target:
+ required: False
+ type: str
+ description:
+ - Name of the target pool.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage pools
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a pool if not present. Fails if already present.
+# if state is present, then create a pool if not present. Succeed if it already exists.
+- name: Create pool if not present
+ hpe.nimble.hpe_nimble_pool:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: "{{ state | default('present') }}"
+ name: "{{ name }}"
+ array_list: "{{ array_list }} "
+ description: "{{ description }}"
+
+- name: Delete pool
+ hpe.nimble.hpe_nimble_pool:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: absent
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_pool(
+ client_obj,
+ pool_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(pool_name):
+ return (False, False, "Create pool failed as pool name is not present.", {}, {})
+
+ try:
+ pool_resp = client_obj.pools.get(id=None, name=pool_name)
+ if utils.is_null_or_empty(pool_resp):
+ params = utils.remove_null_args(**kwargs)
+ pool_resp = client_obj.pools.create(name=pool_name,
+ **params)
+ if pool_resp is not None:
+ return (True, True, f"Created pool '{pool_name}' successfully.", {}, pool_resp.attrs)
+ else:
+ return (False, False, f"Pool '{pool_name}' cannot be created as it is already present in given state.", {}, pool_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Pool creation failed | {ex}", {}, {})
+
+
+def update_pool(
+ client_obj,
+ pool_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(pool_resp):
+ return (False, False, "Update pool failed as pool name is not present.", {}, {})
+ try:
+ pool_name = pool_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(pool_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ pool_resp = client_obj.pools.update(id=pool_resp.attrs.get("id"), **params)
+ return (True, True, f"Pool '{pool_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, pool_resp.attrs)
+ else:
+ return (True, False, f"Pool '{pool_name}' already present in given state.", {}, pool_resp.attrs)
+
+ except Exception as ex:
+ return (False, False, f"Pool update failed | {ex}", {}, {})
+
+
+def delete_pool(
+ client_obj,
+ pool_name):
+
+ if utils.is_null_or_empty(pool_name):
+ return (False, False, "Delete pool failed as pool name is not present.", {})
+
+ try:
+ pool_resp = client_obj.pools.get(id=None, name=pool_name)
+ if utils.is_null_or_empty(pool_resp):
+ return (False, False, f"Cannot delete pool '{pool_name}' as it is not present.", {})
+ else:
+ pool_resp = client_obj.pools.delete(id=pool_resp.attrs.get("id"))
+ return (True, True, f"Deleted pool '{pool_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Pool deletion failed | {ex}", {})
+
+
+def merge_pool(
+ client_obj,
+ pool_name,
+ target,
+ **kwargs):
+
+ if utils.is_null_or_empty(pool_name):
+ return (False, False, "Merge pool failed as pool name is not present.", {}, {})
+ if utils.is_null_or_empty(target):
+ return (False, False, "Delete pool failed as target pool name is not present.", {}, {})
+
+ try:
+ pool_resp = client_obj.pools.get(id=None, name=pool_name)
+ if utils.is_null_or_empty(pool_resp):
+ return (False, False, f"Merge pools failed as source pool '{pool_name}' is not present.", {}, {})
+ target_pool_resp = client_obj.pools.get(id=None, name=target)
+ if utils.is_null_or_empty(target_pool_resp):
+ return (False, False, f"Merge pools failed as target pool '{target}' is not present.", {}, {})
+
+ params = utils.remove_null_args(**kwargs)
+ resp = client_obj.pools.merge(id=pool_resp.attrs.get("id"),
+ target_pool_id=target_pool_resp.attrs.get("id"),
+ **params)
+ if hasattr(resp, 'attrs'):
+ resp = resp.attrs
+ return (True, True, f"Merged target pool '{target}' to pool '{pool_name}' successfully.", {}, resp)
+ except Exception as ex:
+ return (False, False, f"Merge pool failed | {ex}", {}, {})
+
+
+def main():
+
+ fields = {
+ "array_list": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "dedupe_all_volumes": {
+ "required": False,
+ "type": "bool"
+ },
+ "force": {
+ "required": False,
+ "type": "bool"
+ },
+ "is_default": {
+ "required": False,
+ "type": "bool"
+ },
+ "merge": {
+ "required": False,
+ "type": "bool"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ },
+ "target": {
+ "required": False,
+ "type": "str"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['array_list'])]
+
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ pool_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ array_list = module.params["array_list"]
+ force = module.params["force"]
+ dedupe_all_volumes = module.params["dedupe_all_volumes"]
+ is_default = module.params["is_default"]
+ target = module.params["target"]
+ merge = module.params["merge"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == 'present' and merge is True:
+ return_status, changed, msg, changed_attrs_dict, resp = merge_pool(
+ client_obj,
+ pool_name,
+ target,
+ force=force)
+
+ elif (merge is None or merge is False) and (state == "create" or state == "present"):
+ pool_resp = client_obj.pools.get(id=None, name=pool_name)
+
+ if utils.is_null_or_empty(pool_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_pool(
+ client_obj,
+ pool_name,
+ description=description,
+ array_list=array_list,
+ dedupe_all_volumes=dedupe_all_volumes)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_pool(
+ client_obj,
+ pool_resp,
+ name=change_name,
+ description=description,
+ array_list=array_list,
+ force=force,
+ dedupe_all_volumes=dedupe_all_volumes,
+ is_default=is_default)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_pool(
+ client_obj,
+ pool_name)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_schedule.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_schedule.py
new file mode 100644
index 00000000..fdfb1ed1
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_schedule.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Alok Ranjan (@ranjanal)
+description: Manage the protection schedules on an HPE Nimble Storage group.
+module: hpe_nimble_protection_schedule
+options:
+ at_time:
+ required: False
+ type: int
+ default: 0
+ description:
+ - Time of day when snapshot should be taken. In case repeat frequency specifies more than one snapshot
+ in a day then the until_time option specifies until what time of day to take snapshots.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change the name of existing protection schedule.
+ days:
+ required: False
+ type: str
+ description:
+ - Specifies which days snapshots should be taken. Comma separated list of days of the week or 'all'.
+ description:
+ required: False
+ type: str
+ description:
+ - Description of the schedule.
+ disable_appsync:
+ required: False
+ type: bool
+ description:
+ - Disables application synchronized snapshots and creates crash consistent snapshots instead.
+ downstream_partner:
+ required: False
+ type: str
+ description:
+ - Specifies the partner name if snapshots created by this schedule should be replicated.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the protection schedule to create.
+ num_retain:
+ required: False
+ type: int
+ description:
+ - Number of snapshots to retain. If replication is enabled on this schedule the array will always retain the latest
+ replicated snapshot, which may exceed the specified retention value. This is necessary to ensure efficient replication performance.
+ num_retain_replica:
+ required: False
+ type: int
+ default: 0
+ description:
+ - Number of snapshots to retain on the replica.
+ period:
+ required: False
+ type: int
+ description:
+ - Repeat interval for snapshots with respect to the period_unit. For example,
+ a value of 2 with the 'period_unit' of 'hours' results in one snapshot every 2 hours.
+ period_unit:
+ choices:
+ - minutes
+ - hours
+ - days
+ - weeks
+ required: False
+ type: str
+ description:
+ - Time unit over which to take the number of snapshots specified in 'period'. For example, a value of 'days' with a
+ 'period' of '1' results in one snapshot every day.
+ prot_template_name:
+ required: False
+ type: str
+ description:
+ - Name of the protection template in which this protection schedule is attached to.
+ repl_alert_thres:
+ required: False
+ type: int
+ description:
+ - Replication alert threshold in seconds. If the replication of a snapshot takes more than this amount of time to complete
+ an alert will be generated. Enter 0 to disable this alert.
+ replicate_every:
+ required: False
+ type: int
+ description:
+ - Specifies which snapshots should be replicated. If snapshots are replicated and this option is not specified, every snapshot is replicated.
+ schedule_type:
+ choices:
+ - regular
+ - external_trigger
+ required: False
+ type: str
+ description:
+ - Normal schedules have internal timers which drive snapshot creation. An externally driven schedule has no internal timers.
+ All snapshot activity is driven by an external trigger. In other words, these schedules are used only for externally driven manual snapshots.
+ skip_db_consistency_check:
+ required: False
+ type: bool
+ description:
+ - Skip consistency check for database files on snapshots created by this schedule. This option only applies to snapshot schedules of a protection
+ template with application synchronization set to VSS, application ID set to MS Exchange 2010 or later w/DAG, this schedule's snap_verify option
+ set to yes, and its disable_appsync option set to false. Skipping consistency checks is only recommended if each database in a DAG has multiple copies.
+ snap_verify:
+ required: False
+ type: bool
+ description:
+ - Run verification tool on snapshot created by this schedule. This option can only be used with snapshot schedules of a protection template
+ that has application synchronization. The tool used to verify snapshot depends on the type of application. For example, if application
+ synchronization is VSS and the application ID is Exchange, eseutil tool is run on the snapshots. If verification fails, the logs are not truncated.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The protection schedule operations
+ until_time:
+ required: False
+ type: int
+ description:
+ - Time of day to stop taking snapshots. Applicable only when repeat frequency specifies more than one snapshot in a day.
+ use_downstream_for_DR:
+ required: False
+ type: bool
+ description:
+ - Break synchronous replication for the specified volume collection and present downstream volumes to host(s). Downstream volumes in the volume
+ collection will be set to online and presented to the host(s) using new serial and LUN numbers. No changes will be made to the upstream volumes,
+ their serial and LUN numbers, and their online state. The existing ACLs on the upstream volumes will be copied to the downstream volumes.
+ Use this in conjunction with an empty downstream_partner_id. This unconfigures synchronous replication when the partner is removed from the
+ last replicating schedule in the specified volume collection and presents the downstream volumes to host(s). Host(s) will need to be configured
+ to access the new volumes with the newly assigned serial and LUN numbers. Use this option to expose downstream volumes in a synchronously replicated
+ volume collection to host(s) only when the upstream partner is confirmed to be down and there is no communication between partners. Do not execute this
+ operation if a previous Group Management Service takeover has been performed on a different array. Do not perform a subsequent Group Management Service
+ takeover on a different array as it will lead to irreconcilable conflicts. This limitation is cleared once the Group management service backup array has
+ successfully synchronized after reconnection.
+ volcoll_or_prottmpl_type:
+ choices:
+ - protection_template
+ - volume_collection
+ required: True
+ type: str
+ description:
+ - Type of the protection policy this schedule is attached to.
+ volcoll_name:
+ required: False
+ type: str
+ description:
+ - Name of the volume collection in which this protection schedule is attached to.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage protection schedules
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a protection schedule if not present. Fails if already present.
+# if state is present, then create a protection schedule if not present. Succeed if it already exists.
+- name: Create protection schedule if not present
+ hpe.nimble.hpe_nimble_protection_schedule:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ description: "{{ description | default(None)}}"
+ state: "{{ state | default('present') }}"
+ volcoll_or_prottmpl_type: "{{ volcoll_or_prottmpl_type }}"
+ prot_template_name: "{{ prot_template_name }}"
+ num_retain: "{{ num_retain }}"
+
+- name: Delete protection schedule
+ hpe.nimble.hpe_nimble_protection_schedule:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ volcoll_or_prottmpl_type: "{{ volcoll_or_prottmpl_type }}"
+ volcoll_name: "{{ volcoll_name }}"
+ state: absent
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_prot_schedule(
+ client_obj,
+ prot_schedule_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(prot_schedule_name):
+ return (False, False, "Create protection schedule failed as protection schedule name is not present.", {}, {})
+ try:
+ prot_schedule_resp = client_obj.protection_schedules.get(id=None,
+ name=prot_schedule_name,
+ volcoll_or_prottmpl_type=kwargs['volcoll_or_prottmpl_type'],
+ volcoll_or_prottmpl_id=kwargs['volcoll_or_prottmpl_id'])
+ if utils.is_null_or_empty(prot_schedule_resp):
+ params = utils.remove_null_args(**kwargs)
+ prot_schedule_resp = client_obj.protection_schedules.create(name=prot_schedule_name, **params)
+ return (True, True, f"Created protection schedule '{prot_schedule_name}' successfully.", {}, prot_schedule_resp.attrs)
+ else:
+ return (False, False, f"Cannot create protection schedule '{prot_schedule_name}' as it is already present in given state.",
+ {}, prot_schedule_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Protection schedule creation failed | {ex}", {}, {})
+
+
+def update_prot_schedule(
+ client_obj,
+ prot_schedule_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(prot_schedule_resp):
+ return (False, False, "Update protection schedule failed as protection schedule is not present.", {}, {})
+ try:
+ prot_schedule_name = prot_schedule_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(prot_schedule_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ prot_schedule_resp = client_obj.protection_schedules.update(id=prot_schedule_resp.attrs.get("id"), **params)
+ return (True, True, f"Protection schedule '{prot_schedule_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, prot_schedule_resp.attrs)
+ else:
+ return (True, False, f"Protection schedule '{prot_schedule_name}' already present in given state.", {}, prot_schedule_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Protection schedule update failed |{ex}", {}, {})
+
+
+def delete_prot_schedule(client_obj,
+ prot_schedule_name,
+ volcoll_or_prottmpl_type,
+ volcoll_or_prottmpl_id):
+
+ if utils.is_null_or_empty(prot_schedule_name):
+ return (False, False, "Protection schedule deletion failed as protection schedule name is not present", {})
+
+ try:
+ prot_schedule_resp = client_obj.protection_schedules.get(id=None,
+ name=prot_schedule_name,
+ volcoll_or_prottmpl_type=volcoll_or_prottmpl_type,
+ volcoll_or_prottmpl_id=volcoll_or_prottmpl_id)
+ if utils.is_null_or_empty(prot_schedule_resp):
+ return (False, False, f"Protection schedule '{prot_schedule_name}' not present to delete.", {})
+ else:
+ client_obj.protection_schedules.delete(id=prot_schedule_resp.attrs.get("id"))
+ return (True, True, f"Deleted protection schedule '{prot_schedule_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Protection schedule deletion failed | {ex}", {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "volcoll_or_prottmpl_type": {
+ "choices": ['protection_template', 'volume_collection'],
+ "required": True,
+ "type": "str"
+ },
+ "volcoll_name": {
+ "required": False,
+ "type": "str"
+ },
+ "prot_template_name": {
+ "required": False,
+ "type": "str"
+ },
+ "period": {
+ "required": False,
+ "type": "int"
+ },
+ "period_unit": {
+ "choices": ['minutes', 'hours', 'days', 'weeks'],
+ "required": False,
+ "type": "str"
+ },
+ "at_time": {
+ "required": False,
+ "type": "int"
+ },
+ "until_time": {
+ "required": False,
+ "type": "int"
+ },
+ "days": {
+ "required": False,
+ "type": "str"
+ },
+ "num_retain": {
+ "required": False,
+ "type": "int"
+ },
+ "downstream_partner": {
+ "required": False,
+ "type": "str"
+ },
+ "replicate_every": {
+ "required": False,
+ "type": "int"
+ },
+ "num_retain_replica": {
+ "required": False,
+ "type": "int"
+ },
+ "repl_alert_thres": {
+ "required": False,
+ "type": "int"
+ },
+ "snap_verify": {
+ "required": False,
+ "type": "bool"
+ },
+ "skip_db_consistency_check": {
+ "required": False,
+ "type": "bool"
+ },
+ "disable_appsync": {
+ "required": False,
+ "type": "bool"
+ },
+ "schedule_type": {
+ "choices": ['regular', 'external_trigger'],
+ "required": False,
+ "type": "str"
+ },
+ "use_downstream_for_DR": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+
+ mutually_exclusive = [
+ ['prot_template_name', 'volcoll_name']
+ ]
+ required_if = [
+ ['state', 'create', ['num_retain']]
+ ]
+ required_one_of = [
+ ['volcoll_name', 'prot_template_name']
+ ]
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields, mutually_exclusive=mutually_exclusive, required_if=required_if, required_one_of=required_one_of)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ prot_schedule_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ volcoll_or_prottmpl_type = module.params["volcoll_or_prottmpl_type"]
+ volcoll_name = module.params["volcoll_name"]
+ prot_template_name = module.params["prot_template_name"]
+ period = module.params["period"]
+ period_unit = module.params["period_unit"]
+ at_time = module.params["at_time"]
+ until_time = module.params["until_time"]
+ days = module.params["days"]
+ num_retain = module.params["num_retain"]
+ downstream_partner = module.params["downstream_partner"]
+ replicate_every = module.params["replicate_every"]
+ num_retain_replica = module.params["num_retain_replica"]
+ repl_alert_thres = module.params["repl_alert_thres"]
+ snap_verify = module.params["snap_verify"]
+ skip_db_consistency_check = module.params["skip_db_consistency_check"]
+ disable_appsync = module.params["disable_appsync"]
+ schedule_type = module.params["schedule_type"]
+ use_downstream_for_DR = module.params["use_downstream_for_DR"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ # we need to enforce the below params as mandatory as there can be a scenario where in a protection schedule with the same name
+ # exists in a different volume collection or in different protection tempalate. Hence, if a user wants to modify/update
+ # a protection schedule , they need to provide all the three params for us to query and find the exact protection schedule
+ if volcoll_name is None and prot_template_name is None or volcoll_or_prottmpl_type is None:
+ module.fail_json(msg='Please provide the Mandatory params : volcoll_or_prottmpl_type, and volcoll_name or prot_template_name.')
+
+ prot_schedule_resp = client_obj.protection_schedules.get(
+ id=None,
+ name=prot_schedule_name,
+ volcoll_or_prottmpl_type=volcoll_or_prottmpl_type,
+ volcoll_or_prottmpl_id=utils.get_volcoll_or_prottmpl_id(client_obj, volcoll_name, prot_template_name))
+
+ if utils.is_null_or_empty(prot_schedule_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_prot_schedule(
+ client_obj,
+ prot_schedule_name,
+ description=description,
+ volcoll_or_prottmpl_type=volcoll_or_prottmpl_type,
+ volcoll_or_prottmpl_id=utils.get_volcoll_or_prottmpl_id(client_obj, volcoll_name, prot_template_name),
+ period=period,
+ period_unit=period_unit,
+ at_time=at_time,
+ until_time=until_time,
+ days=days,
+ num_retain=num_retain,
+ downstream_partner=downstream_partner,
+ downstream_partner_id=utils.get_downstream_partner_id(client_obj, downstream_partner),
+ replicate_every=replicate_every,
+ num_retain_replica=num_retain_replica,
+ repl_alert_thres=repl_alert_thres,
+ snap_verify=snap_verify,
+ skip_db_consistency_check=skip_db_consistency_check,
+ disable_appsync=disable_appsync,
+ schedule_type=schedule_type)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_prot_schedule(
+ client_obj,
+ prot_schedule_resp,
+ name=change_name,
+ description=description,
+ period=period,
+ period_unit=period_unit,
+ at_time=at_time,
+ until_time=until_time,
+ days=days,
+ num_retain=num_retain,
+ downstream_partner=downstream_partner,
+ downstream_partner_id=utils.get_downstream_partner_id(client_obj, downstream_partner),
+ replicate_every=replicate_every,
+ num_retain_replica=num_retain_replica,
+ repl_alert_thres=repl_alert_thres,
+ snap_verify=snap_verify,
+ skip_db_consistency_check=skip_db_consistency_check,
+ disable_appsync=disable_appsync,
+ schedule_type=schedule_type,
+ use_downstream_for_DR=use_downstream_for_DR)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_prot_schedule(
+ client_obj,
+ prot_schedule_name,
+ volcoll_or_prottmpl_type,
+ utils.get_volcoll_or_prottmpl_id(client_obj, volcoll_name, prot_template_name))
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_template.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_template.py
new file mode 100644
index 00000000..7491d844
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_protection_template.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the protection templates on an HPE Nimble Storage group.
+module: hpe_nimble_protection_template
+options:
+ agent_hostname:
+ required: False
+ type: str
+ description:
+ - Generic backup agent hostname.
+ agent_password:
+ required: False
+ type: str
+ description:
+ - Generic backup agent password.
+ agent_username:
+ required: False
+ type: str
+ description:
+ - Generic backup agent username.
+ app_cluster:
+ required: False
+ type: str
+ description:
+ - If the application is running within a windows cluster environment, this is the cluster name.
+ app_id:
+ required: False
+ choices:
+ - inval
+ - exchange
+ - exchange_dag
+ - hyperv
+ - sql2005
+ - sql2008
+ - sql2012
+ - sql2014
+ - sql2016
+ - sql2017
+ type: str
+ description:
+ - Application ID running on the server.
+ app_server:
+ required: False
+ type: str
+ description:
+ - Application server hostname.
+ app_service_name:
+ required: False
+ type: str
+ description:
+ - If the application is running within a windows cluster environment then this is the instance name of the service running within the cluster environment.
+ app_sync:
+ choices:
+ - none
+ - vss
+ - vmware
+ - generic
+ required: False
+ type: str
+ description:
+ - Application synchronization.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing protection template.
+ description:
+ required: False
+ type: str
+ description:
+ - Text description of protection template.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the protection template.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The protection template operations.
+ vcenter_hostname:
+ required: False
+ type: str
+ description:
+ - VMware vCenter hostname.
+ vcenter_password:
+ required: False
+ type: str
+ description:
+ - Application VMware vCenter password. A password with few constraints.
+ vcenter_username:
+ required: False
+ type: str
+ description:
+ - Application VMware vCenter username. String of up to 80 alphanumeric characters, beginning with a letter.
+ It can include ampersand (@), backslash (\), dash (-), period (.), and underscore (_).
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage protection templates
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a protection template if not present. Fails if already present.
+# if state is present, then create a protection template if not present. Succeed if it already exists.
+- name: Create protection template if not present
+ hpe.nimble.hpe_nimble_protection_template:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ description: "{{ description | default(None)}}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete protection template
+ hpe.nimble.hpe_nimble_protection_template:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: absent
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_prot_template(
+ client_obj,
+ prot_template_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(prot_template_name):
+ return (False, False, "Create protection template failed as protection template name is not present.", {}, {})
+ try:
+ prot_template_resp = client_obj.protection_templates.get(id=None, name=prot_template_name)
+ if utils.is_null_or_empty(prot_template_resp):
+ params = utils.remove_null_args(**kwargs)
+ prot_template_resp = client_obj.protection_templates.create(name=prot_template_name, **params)
+ return (True, True, f"Protection template '{prot_template_name}' created successfully.", {}, prot_template_resp.attrs)
+ else:
+ return (False, False, f"Protection template '{prot_template_name}' cannot be created as it is already present in given state.",
+ {}, prot_template_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Protection template creation failed | {ex}", {}, {})
+
+
+def update_prot_template(
+ client_obj,
+ prot_template_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(prot_template_resp):
+ return (False, False, "Update protection template failed as protection template is not present.", {}, {})
+ try:
+ prot_template_name = prot_template_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(prot_template_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ prot_template_resp = client_obj.protection_templates.update(id=prot_template_resp.attrs.get("id"), **params)
+ return (True, True, f"Protection template '{prot_template_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, prot_template_resp.attrs)
+ else:
+ return (True, False, f"Protection template '{prot_template_name}' already present in given state.", {}, prot_template_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Protection template update failed | {ex}", {}, {})
+
+
+def delete_prot_template(client_obj, prot_template_name):
+
+ if utils.is_null_or_empty(prot_template_name):
+ return (False, False, "Protection template deletion failed as protection template name is not present.", {})
+
+ try:
+ prot_template_resp = client_obj.protection_templates.get(id=None, name=prot_template_name)
+ if utils.is_null_or_empty(prot_template_resp):
+ return (False, False, f"Protection template '{prot_template_name}' not present to delete.", {})
+ else:
+ client_obj.protection_templates.delete(id=prot_template_resp.attrs.get("id"))
+ return (True, True, f"Deleted protection template '{prot_template_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Protection template deletion failed | {ex}", {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "app_sync": {
+ "choices": ['none', 'vss', 'vmware', 'generic'],
+ "required": False,
+ "type": "str"
+ },
+ "app_server": {
+ "required": False,
+ "type": "str"
+ },
+ "app_id": {
+ "required": False,
+ "choices": ['inval', 'exchange', 'exchange_dag', 'hyperv', 'sql2005', 'sql2008', 'sql2012', 'sql2014', 'sql2016', 'sql2017'],
+ "type": "str"
+ },
+ "app_cluster": {
+ "required": False,
+ "type": "str"
+ },
+ "app_service_name": {
+ "required": False,
+ "type": "str"
+ },
+ "vcenter_hostname": {
+ "required": False,
+ "type": "str"
+ },
+ "vcenter_username": {
+ "required": False,
+ "type": "str"
+ },
+ "vcenter_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "agent_hostname": {
+ "required": False,
+ "type": "str"
+ },
+ "agent_username": {
+ "required": False,
+ "type": "str"
+ },
+ "agent_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ prot_template_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ app_sync = module.params["app_sync"]
+ app_server = module.params["app_server"]
+ app_id = module.params["app_id"]
+ app_cluster = module.params["app_cluster"]
+ app_service_name = module.params["app_service_name"]
+ vcenter_hostname = module.params["vcenter_hostname"]
+ vcenter_username = module.params["vcenter_username"]
+ vcenter_password = module.params["vcenter_password"]
+ agent_hostname = module.params["agent_hostname"]
+ agent_username = module.params["agent_username"]
+ agent_password = module.params["agent_password"]
+
+ if (username is None or password is None or hostname is None or prot_template_name is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username, password and protection template is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ prot_template_resp = client_obj.protection_templates.get(id=None, name=prot_template_name)
+ if utils.is_null_or_empty(prot_template_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_prot_template(
+ client_obj,
+ prot_template_name,
+ description=description,
+ app_sync=app_sync,
+ app_server=app_server,
+ app_id=app_id,
+ app_cluster_name=app_cluster,
+ app_service_name=app_service_name,
+ vcenter_hostname=vcenter_hostname,
+ vcenter_username=vcenter_username,
+ vcenter_password=vcenter_password,
+ agent_hostname=agent_hostname,
+ agent_username=agent_username,
+ agent_password=agent_password)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_prot_template(
+ client_obj,
+ prot_template_resp,
+ name=change_name,
+ description=description,
+ app_sync=app_sync,
+ app_server=app_server,
+ app_id=app_id, app_cluster_name=app_cluster,
+ app_service_name=app_service_name,
+ vcenter_hostname=vcenter_hostname,
+ vcenter_username=vcenter_username,
+ vcenter_password=vcenter_password,
+ agent_hostname=agent_hostname,
+ agent_username=agent_username,
+ agent_password=agent_password)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_prot_template(client_obj, prot_template_name)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_shelf.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_shelf.py
new file mode 100644
index 00000000..88e15e05
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_shelf.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the shelves on an HPE Nimble Storage group.
+module: hpe_nimble_shelf
+options:
+ accept_dedupe_impact:
+ required: False
+ type: bool
+ description:
+ - Accept the reduction or elimination of deduplication capability on the system as a result of activating a shelf
+ that does not meet the necessary deduplication requirements.
+ accept_foreign:
+ required: False
+ type: bool
+ description:
+ - Accept the removal of data on the shelf disks and activate foreign shelf.
+ activated:
+ required: True
+ type: bool
+ description:
+ - Activated state for shelf or disk set means it is available to store date on. An activated shelf may not be deactivated.
+ driveset:
+ required: False
+ type: int
+ description:
+ - Driveset to activate.
+ force:
+ required: False
+ type: bool
+ description:
+ - Forcibly activate shelf.
+ last_request:
+ required: False
+ type: bool
+ description:
+ - Indicates this is the last request in a series of shelf add requests.
+ state:
+ required: True
+ choices:
+ - present
+ type: str
+ description:
+ - The shelf operation.
+ shelf_serial:
+ required: True
+ type: str
+ description:
+ - Serial number of shelf.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage shelves
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+- name: Update shelf
+ hpe.nimble.hpe_nimble_shelf:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ shelf_serial: "{{ shelf_serial | mandatory }}"
+ accept_foreign: "{{ accept_foreign }}"
+ force: "{{ force }}"
+ activated: "{{ activated }}"
+ state: present
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def update_shelve(
+ client_obj,
+ shelf_serial,
+ **kwargs):
+
+ if utils.is_null_or_empty(shelf_serial):
+ return (False, False, "Shelf update failed as no shelf id provided.", {})
+
+ try:
+ shelf_list_resp = client_obj.shelves.list(detail=True)
+ if utils.is_null_or_empty(shelf_list_resp):
+ return (False, False, f"Shelf serial '{shelf_serial}' is not present on array.", {})
+ else:
+ shelf_resp = None
+ # check if the given shelf serial is present on array
+ for resp in shelf_list_resp:
+ if shelf_serial == resp.attrs.get("serial"):
+ shelf_resp = resp
+ break
+ if utils.is_null_or_empty(shelf_resp):
+ return (False, False, f"Shelf serial '{shelf_serial}' is not present on array.", {})
+ else:
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(shelf_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ shelf_resp = client_obj.shelves.update(id=shelf_resp.attrs.get("id"), **params)
+ return (True, True, f"Successfully updated Shelf '{shelf_serial}'.", shelf_resp.attrs)
+ else:
+ return (True, False, f"Shelf serial '{shelf_serial}' already updated.", shelf_resp.attrs)
+ except Exception as e:
+ return (False, False, "Shelf update failed | %s" % str(e), {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present'],
+ "type": "str"
+ },
+ "shelf_serial": {
+ "required": True,
+ "type": "str"
+ },
+ "activated": {
+ "required": True,
+ "type": "bool"
+ },
+ "driveset": {
+ "required": False,
+ "type": "int"
+ },
+ "force": {
+ "required": False,
+ "type": "bool"
+ },
+ "accept_foreign": {
+ "required": False,
+ "type": "bool"
+ },
+ "accept_dedupe_impact": {
+ "required": False,
+ "type": "bool"
+ },
+ "last_request": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ shelf_serial = module.params["shelf_serial"]
+ activated = module.params["activated"]
+ driveset = module.params["driveset"]
+ force = module.params["force"]
+ accept_foreign = module.params["accept_foreign"]
+ accept_dedupe_impact = module.params["accept_dedupe_impact"]
+ last_request = module.params["last_request"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "present":
+ return_status, changed, msg, resp = update_shelve(
+ client_obj,
+ shelf_serial,
+ activated=activated,
+ driveset=driveset,
+ force=force,
+ accept_foreign=accept_foreign,
+ accept_dedupe_impact=accept_dedupe_impact,
+ last_request=last_request)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot.py
new file mode 100644
index 00000000..aaf89155
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the snapshots on an HPE Nimble Storage group.
+module: hpe_nimble_snapshot
+options:
+ agent_type:
+ required: False
+ choices:
+ - none
+ - smis
+ - vvol
+ - openstack
+ - openstackv2
+ type: str
+ description:
+ - External management agent type.
+ app_uuid:
+ required: False
+ type: str
+ description:
+ - Application identifier of snapshot.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing snapshot.
+ description:
+ required: False
+ type: str
+ description:
+ - Text description of snapshot.
+ expiry_after:
+ required: False
+ type: int
+ description:
+ - Number of seconds after which this snapshot is considered expired by snapshot TTL. A value of 0 indicates that snapshot never expires.
+ force:
+ required: False
+ type: bool
+ description:
+ - Forcibly delete the specified snapshot even if it is the last replicated collection. Doing so could lead to full re-seeding at the next replication.
+ metadata:
+ required: False
+ type: dict
+ description:
+ - Key-value pairs that augment a snapshot's attributes. List of key-value pairs. Keys must be unique and non-empty.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the snapshot.
+ online:
+ required: False
+ type: bool
+ description:
+ - Online state for a snapshot means it could be mounted for data restore.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The snapshot state.
+ volume:
+ required: True
+ type: str
+ description:
+ - Parent volume name.
+ writable:
+ required: False
+ type: bool
+ description:
+ - Allow snapshot to be writable. Mandatory and must be set to 'true' for VSS application synchronized snapshots.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage snapshots
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a snapshot if not present. Fails if already present.
+# if state is present, then create a snapshot if not present. Succeeds if it already exists.
+- name: Create snapshot if not present
+ hpe.nimble.hpe_nimble_snapshot:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: "{{ state | default('present') }}"
+ volume: "{{ volume }}"
+ name: "{{ name }}"
+ online: "{{ online | default(true) }}"
+ writable: "{{ writable | default(false) }}"
+
+- name: Delete snapshot (must be offline)
+ hpe.nimble.hpe_nimble_snapshot:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ volume: "{{ volume }}"
+ name: "{{ name }}"
+ state: absent
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_snapshot(
+ client_obj,
+ vol_name,
+ snapshot_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(snapshot_name):
+ return (False, False, "Create snapshot failed as snapshot is not present.", {}, {})
+ if utils.is_null_or_empty(vol_name):
+ return (False, False, "Create snapshot failed as volume is not present.", {}, {})
+
+ try:
+ vol_resp = client_obj.volumes.get(id=None, name=vol_name)
+ if utils.is_null_or_empty(vol_resp):
+ return (False, False, f"Volume '{vol_name}' not present on array for taking snapshot.", {}, {})
+ snap_resp = client_obj.snapshots.get(id=None, vol_name=vol_name, name=snapshot_name)
+ if utils.is_null_or_empty(snap_resp):
+ params = utils.remove_null_args(**kwargs)
+ snap_resp = client_obj.snapshots.create(name=snapshot_name,
+ vol_id=vol_resp.attrs.get("id"),
+ **params)
+ if snap_resp is not None:
+ return (True, True, f"Snapshot '{snapshot_name}' created successfully.", {}, snap_resp.attrs)
+ else:
+ return (False, False, f"Snapshot '{snapshot_name}' cannot be created as it is already present in given state.", {}, {})
+ except Exception as ex:
+ return (False, False, f"Snapshot creation failed | {ex}", {}, {})
+
+
+def update_snapshot(
+ client_obj,
+ snap_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(snap_resp):
+ return (False, False, "Update snapshot failed as snapshot is not present.", {}, {})
+
+ try:
+ snapshot_name = snap_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(snap_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ snap_resp = client_obj.snapshots.update(id=snap_resp.attrs.get("id"), **params)
+ return (True, True, f"Snapshot '{snapshot_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, snap_resp.attrs)
+ else:
+ return (True, False, f"Snapshot '{snapshot_name}' already present in given state.", {}, snap_resp.attrs)
+
+ except Exception as ex:
+ return (False, False, f"Snapshot update failed | {ex}", {}, {})
+
+
+def delete_snapshot(
+ client_obj,
+ vol_name,
+ snapshot_name):
+
+ if utils.is_null_or_empty(snapshot_name):
+ return (False, False, "Delete snapshot failed as snapshot is not present.", {})
+ if utils.is_null_or_empty(vol_name):
+ return (False, False, "Delete snapshot failed. Volume is not present.", {})
+
+ try:
+ vol_resp = client_obj.volumes.get(id=None, name=vol_name)
+ if utils.is_null_or_empty(vol_resp):
+ return (False, False, f"Volume '{vol_name}' is not present on Array for deleting snapshot.", {})
+ snap_resp = client_obj.snapshots.get(id=None, vol_name=vol_name, name=snapshot_name)
+ if utils.is_null_or_empty(snap_resp):
+ return (False, False, f"Snapshot '{snapshot_name}' cannot be deleted as it is not present in given volume '{vol_name}'.", {})
+ else:
+ client_obj.snapshots.delete(id=snap_resp.attrs.get("id"))
+ return (True, True, f"Deleted snapshot '{snapshot_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Snapshot deletion failed | {ex}", {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "volume": {
+ "required": True,
+ "type": "str"
+ },
+ "online": {
+ "required": False,
+ "type": "bool"
+ },
+ "writable": {
+ "required": False,
+ "type": "bool"
+ },
+ "app_uuid": {
+ "required": False,
+ "type": "str"
+ },
+ "metadata": {
+ "required": False,
+ "type": "dict"
+ },
+ "agent_type": {
+ "required": False,
+ "choices": ['none', 'smis', 'vvol', 'openstack', 'openstackv2'],
+ "type": "str"
+ },
+ "expiry_after": {
+ "required": False,
+ "type": "int"
+ },
+ "force": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['volume'])]
+
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ snapshot_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ vol_name = module.params["volume"]
+ online = module.params["online"]
+ writable = module.params["writable"]
+ app_uuid = module.params["app_uuid"]
+ metadata = module.params["metadata"]
+ agent_type = module.params["agent_type"]
+ expiry_after = module.params["expiry_after"]
+ force = module.params["force"]
+
+ if (username is None or password is None or hostname is None or snapshot_name is None):
+ module.fail_json(
+ msg="Storage system IP or username or password is null or snapshot name is null.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ snap_resp = client_obj.snapshots.get(id=None, vol_name=vol_name, name=snapshot_name)
+ if utils.is_null_or_empty(snap_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_snapshot(
+ client_obj,
+ vol_name,
+ snapshot_name,
+ description=description,
+ online=online,
+ writable=writable,
+ app_uuid=app_uuid,
+ metadata=metadata,
+ agent_type=agent_type)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_snapshot(
+ client_obj,
+ snap_resp,
+ name=change_name,
+ description=description,
+ online=online,
+ expiry_after=expiry_after,
+ app_uuid=app_uuid,
+ metadata=metadata,
+ force=force)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_snapshot(
+ client_obj,
+ vol_name,
+ snapshot_name)
+
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot_collection.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot_collection.py
new file mode 100644
index 00000000..ed8e743e
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_snapshot_collection.py
@@ -0,0 +1,403 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the snapshot collections on an HPE Nimble Storage group.
+module: hpe_nimble_snapshot_collection
+options:
+ agent_type:
+ required: False
+ type: str
+ description:
+ - External management agent type for snapshots being created as part of snapshot collection.
+ allow_writes:
+ required: False
+ type: bool
+ description:
+ - Allow applications to write to created snapshot(s). Mandatory and must be set to 'true' for VSS application synchronized snapshots.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing snapshot collection.
+ description:
+ required: False
+ type: str
+ description:
+ - Text description of snapshot collection.
+ disable_appsync:
+ required: False
+ type: bool
+ description:
+ - Do not perform application synchronization for this snapshot. Create a crash-consistent snapshot instead.
+ expiry_after:
+ required: False
+ type: int
+ description:
+ - Number of seconds after which this snapcoll is considered expired by the snapshot TTL. A value of 0 indicates that the snapshot
+ never expires, 1 indicates that the snapshot uses a group-level configured TTL value and any other value indicates the number of seconds.
+ force:
+ required: False
+ type: bool
+ description:
+ - Forcibly delete the specified snapshot collection even if it is the last replicated snapshot. Doing so could lead to full re-seeding at the
+ next replication.
+ invoke_on_upstream_partner:
+ required: False
+ type: bool
+ description:
+ - Invoke snapshot request on upstream partner. This operation is not supported for synchronous replication volume collections.
+ is_external_trigger:
+ required: False
+ type: bool
+ description:
+ - Is externally triggered.
+ metadata:
+ required: False
+ type: dict
+ description:
+ - Key-value pairs that augment a snapshot collection attributes. List of key-value pairs. Keys must be unique and non-empty.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the snapshot collection.
+ replicate_to:
+ required: False
+ type: str
+ description:
+ - Specifies the partner name that the snapshots in this snapshot collection are replicated to.
+ skip_db_consistency_check:
+ required: False
+ type: bool
+ description:
+ - Skip consistency check for database files on this snapshot. This option only applies to volume collections with application
+ synchronization set to VSS, application ID set to MS Exchange 2010 or later with Database Availability Group (DAG), snap_verify option
+ set to true, and disable_appsync option set to false.
+ snap_verify:
+ required: False
+ type: bool
+ description:
+ - Run verification tool on this snapshot. This option can only be used with a volume collection that has application synchronization.
+ start_online:
+ required: False
+ type: bool
+ description:
+ - Start with snapshot set online.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The snapshot collection operation.
+ vol_snap_attr_list:
+ required: False
+ type: list
+ elements: dict
+ description:
+ - List of snapshot attributes for snapshots being created as part of snapshot collection creation. List of volumes with per snapshot attributes.
+ volcoll:
+ required: True
+ type: str
+ description:
+ - Parent volume collection name.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage snapshot collections
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a snapshot collection if not present. Fails if already present.
+# if state is present, then create a snapshot collection if not present. Succeeds if it already exists
+- name: Create snapshot collection if not present
+ hpe.nimble.hpe_nimble_snapshot_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: "{{ state | default('present') }}"
+ name: "{{ name | mandatory}}"
+ volcoll: "{{ volcoll | mandatory}}"
+ description: "{{ description }}"
+
+- name: Delete snapshot collection (must be offline)
+ hpe.nimble.hpe_nimble_snapshot_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ volcoll: "{{ volcoll }}"
+ state: absent
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_snapcoll(
+ client_obj,
+ snapcoll_name,
+ volcoll_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(snapcoll_name):
+ return (False, False, "Create snapshot collection failed. snapshot collection name is not present.", {}, {})
+ try:
+ snapcoll_resp = client_obj.snapshot_collections.get(id=None, name=snapcoll_name, volcoll_name=volcoll_name)
+ if utils.is_null_or_empty(snapcoll_resp):
+ params = utils.remove_null_args(**kwargs)
+ snapcoll_resp = client_obj.snapshot_collections.create(name=snapcoll_name, **params)
+ return (True, True, f"Created snapshot collection '{snapcoll_name}' for volume collection '{volcoll_name}' successfully.", {}, snapcoll_resp.attrs)
+ else:
+ return (False, False, f"Snapshot collection '{snapcoll_name}' for volume collection '{volcoll_name}' cannot be created"
+ "as it is already present in given state.", {}, snapcoll_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Snapshot collection creation failed | {ex}", {}, {})
+
+
+def update_snapcoll(
+ client_obj,
+ snapcoll_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(snapcoll_resp):
+ return (False, False, "Update snapshot collection failed as snapshot collection is not present.", {}, {})
+ try:
+ snapcoll_name = snapcoll_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(snapcoll_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ snapcoll_resp = client_obj.snapshot_collections.update(id=snapcoll_resp.attrs.get("id"), **params)
+ return (True, True, f"Snapshot collection '{snapcoll_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, snapcoll_resp.attrs)
+ else:
+ return (True, False, f"Snapshot collection '{snapcoll_name}' already present in given state.", {}, snapcoll_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Snapshot collection update failed | {ex}", {}, {})
+
+
+def delete_snapcoll(client_obj, snapcoll_name, volcoll_name):
+
+ if utils.is_null_or_empty(snapcoll_name):
+ return (False, False, "Snapshot collection deletion failed as snapshot collection name is not present.", {})
+
+ try:
+ snapcoll_resp = client_obj.snapshot_collections.get(id=None, name=snapcoll_name, volcoll_name=volcoll_name)
+ if utils.is_null_or_empty(snapcoll_resp):
+ return (False, False, f"Snapshot collection '{snapcoll_name}' for volume collection '{volcoll_name}' not present to delete.", {})
+ else:
+ client_obj.snapshot_collections.delete(id=snapcoll_resp.attrs.get("id"))
+ return (True, True, f"Snapshot collection '{snapcoll_name}' for volume collection '{volcoll_name}' deleted successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Snapshot collection deletion failed | {ex}", {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "volcoll": {
+ "required": True,
+ "type": "str"
+ },
+ "is_external_trigger": {
+ "required": False,
+ "type": "bool"
+ },
+ "vol_snap_attr_list": {
+ "required": False,
+ "type": "list",
+ "elements": 'dict'
+ },
+ "replicate_to": {
+ "required": False,
+ "type": "str"
+ },
+ "start_online": {
+ "required": False,
+ "type": "bool"
+ },
+ "allow_writes": {
+ "required": False,
+ "type": "bool"
+ },
+ "disable_appsync": {
+ "required": False,
+ "type": "bool"
+ },
+ "snap_verify": {
+ "required": False,
+ "type": "bool"
+ },
+ "skip_db_consistency_check": {
+ "required": False,
+ "type": "bool"
+ },
+ "invoke_on_upstream_partner": {
+ "required": False,
+ "type": "bool"
+ },
+ "agent_type": {
+ "required": False,
+ "type": "str"
+ },
+ "metadata": {
+ "required": False,
+ "type": "dict"
+ },
+ "expiry_after": {
+ "required": False,
+ "type": "int"
+ },
+ "force": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ snapcoll_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ volcoll = module.params["volcoll"]
+ is_external_trigger = module.params["is_external_trigger"]
+ vol_snap_attr_list = module.params["vol_snap_attr_list"]
+ replicate_to = module.params["replicate_to"]
+ start_online = module.params["start_online"]
+ allow_writes = module.params["allow_writes"]
+ disable_appsync = module.params["disable_appsync"]
+ snap_verify = module.params["snap_verify"]
+ skip_db_consistency_check = module.params["skip_db_consistency_check"]
+ invoke_on_upstream_partner = module.params["invoke_on_upstream_partner"]
+ agent_type = module.params["agent_type"]
+ metadata = module.params["metadata"]
+ expiry_after = module.params["expiry_after"]
+ force = module.params["force"]
+
+ if (username is None or password is None or hostname is None or snapcoll_name is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username, password and snapshot collection name is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "create" or state == "present":
+ snapcoll_resp = client_obj.snapshot_collections.get(id=None, name=snapcoll_name, volcoll_name=volcoll)
+ if utils.is_null_or_empty(snapcoll_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_snapcoll(
+ client_obj,
+ snapcoll_name,
+ volcoll,
+ description=description,
+ volcoll_id=utils.get_volcoll_id(client_obj, volcoll),
+ is_external_trigger=is_external_trigger,
+ vol_snap_attr_list=vol_snap_attr_list,
+ replicate_to=replicate_to,
+ start_online=start_online,
+ allow_writes=allow_writes,
+ disable_appsync=disable_appsync,
+ snap_verify=snap_verify,
+ skip_db_consistency_check=skip_db_consistency_check,
+ invoke_on_upstream_partner=invoke_on_upstream_partner,
+ agent_type=agent_type,
+ metadata=metadata)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_snapcoll(
+ client_obj,
+ snapcoll_resp,
+ name=change_name,
+ description=description,
+ replicate_to=replicate_to,
+ expiry_after=expiry_after,
+ metadata=metadata,
+ force=force)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_snapcoll(client_obj,
+ snapcoll_name,
+ volcoll)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user.py
new file mode 100644
index 00000000..8deb733c
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user.py
@@ -0,0 +1,381 @@
+#!/usr/bin/python
+
+# # Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the users on an HPE Nimble Storage group.
+module: hpe_nimble_user
+options:
+ auth_password:
+ required: False
+ type: str
+ description:
+ - Authorization password for changing password.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing user.
+ description:
+ required: False
+ type: str
+ description:
+ - Description of the user.
+ disabled:
+ required: False
+ type: bool
+ description:
+ - User is currently disabled.
+ email_addr:
+ required: False
+ type: str
+ description:
+ - Email address of the user.
+ full_name:
+ required: False
+ type: str
+ description:
+ - Fully qualified name of the user.
+ inactivity_timeout:
+ required: False
+ type: int
+ default: 0
+ description:
+ - The amount of time that the user session is inactive before timing out. A value of 0 indicates that the timeout is taken from the group setting.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the user.
+ user_password:
+ required: False
+ type: str
+ description:
+ - User's login password.
+ role:
+ required: False
+ choices:
+ - administrator
+ - poweruser
+ - operator
+ - guest
+ type: str
+ description:
+ - Role of the user. Default is 'guest'.
+ state:
+ required: True
+ choices:
+ - create
+ - present
+ - absent
+ type: str
+ description:
+ - The user operation.
+ unlock:
+ required: False
+ type: bool
+ description:
+ - Unlock the user.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage users
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create, then create user, fails if it exist or cannot create
+# if state is present, then create user if not present, else success
+- name: Create user
+ hpe.nimble.hpe_nimble_user:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ description: "{{ description }}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete user
+ hpe.nimble.hpe_nimble_user:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "absent"
+
+- name: Unlock user
+ hpe.nimble.hpe_nimble_user:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: "present"
+ unlock: true
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_user(
+ client_obj,
+ user_name,
+ password,
+ **kwargs):
+
+ if utils.is_null_or_empty(user_name):
+ return (False, False, "Create user failed as user is not present.", {}, {})
+ if utils.is_null_or_empty(password):
+ return (False, False, "Create user failed as password is not present.", {}, {})
+
+ try:
+ user_resp = client_obj.users.get(id=None, name=user_name)
+ if utils.is_null_or_empty(user_resp):
+ params = utils.remove_null_args(**kwargs)
+ user_resp = client_obj.users.create(name=user_name, password=password, **params)
+ return (True, True, f"User '{user_name}' created successfully.", {}, user_resp.attrs)
+ else:
+ return (False, False, f"User '{user_name}' cannot be created as it is already present in given state.", {}, {})
+ except Exception as ex:
+ return (False, False, f"User creation failed | {ex}", {}, {})
+
+
+def update_user(
+ client_obj,
+ user_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(user_name):
+ return (False, False, "Update user failed as user is not present.", {}, {})
+
+ try:
+ user_resp = client_obj.users.get(id=None, name=user_name)
+ if utils.is_null_or_empty(user_resp):
+ return (False, False, f"User '{user_name}' cannot be updated as it is not present.", {}, {})
+
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(user_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ user_resp = client_obj.users.update(id=user_resp.attrs.get("id"), **params)
+ return (True, True, f"User '{user_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, user_resp.attrs)
+ else:
+ return (True, False, f"User '{user_resp.attrs.get('name')}' already present in given state.", {}, user_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"User update failed | {ex}", {}, {})
+
+
+def delete_user(
+ client_obj,
+ user_name):
+
+ if utils.is_null_or_empty(user_name):
+ return (False, False, "Delete user failed as user is not present.", {})
+
+ try:
+ user_resp = client_obj.users.get(id=None, name=user_name)
+ if utils.is_null_or_empty(user_resp):
+ return (False, False, f"User '{user_name}' cannot be deleted as it is not present.", {})
+
+ client_obj.users.delete(id=user_resp.attrs.get("id"))
+ return (True, True, f"Deleted user '{user_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Delete user failed | {ex}", {})
+
+
+def unlock_user(
+ client_obj,
+ user_name):
+
+ if utils.is_null_or_empty(user_name):
+ return (False, False, "Unlock user failed as user name is not present.", {})
+
+ try:
+ user_resp = client_obj.users.get(id=None, name=user_name)
+ if utils.is_null_or_empty(user_resp):
+ return (False, False, f"User '{user_name}' cannot be unlocked as it is not present.", {})
+
+ client_obj.users.unlock(id=user_resp.attrs.get("id"))
+ return (True, True, f"Unlocked user '{user_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Unlock user failed | {ex}", {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['create',
+ 'present',
+ 'absent'
+ ],
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "role": {
+ "required": False,
+ "choices": ['administrator',
+ 'poweruser',
+ 'operator',
+ 'guest'
+ ],
+ "type": "str"
+ },
+ "user_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "inactivity_timeout": {
+ "required": False,
+ "type": "int"
+ },
+ "full_name": {
+ "required": False,
+ "type": "str"
+ },
+ "email_addr": {
+ "required": False,
+ "type": "str"
+ },
+ "disabled": {
+ "required": False,
+ "type": "bool"
+ },
+ "auth_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "unlock": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'create', ['user_password'])]
+
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ user_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ role = module.params["role"]
+ user_password = module.params["user_password"]
+ inactivity_timeout = module.params["inactivity_timeout"]
+ full_name = module.params["full_name"]
+ email_addr = module.params["email_addr"]
+ disabled = module.params["disabled"]
+ auth_password = module.params["auth_password"]
+ unlock = module.params["unlock"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if ((unlock is None or unlock is False) and (state == "create" or state == "present")):
+ if not client_obj.users.get(id=None, name=user_name) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_user(
+ client_obj,
+ user_name,
+ user_password,
+ description=description,
+ role=role,
+ inactivity_timeout=inactivity_timeout,
+ full_name=full_name,
+ email_addr=email_addr,
+ disabled=disabled)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_user(
+ client_obj,
+ user_name,
+ name=change_name,
+ password=user_password,
+ description=description,
+ role=role,
+ inactivity_timeout=inactivity_timeout,
+ full_name=full_name,
+ email_addr=email_addr,
+ disabled=disabled,
+ auth_password=auth_password)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_user(client_obj, user_name)
+
+ elif state == "present" and unlock is True:
+ return_status, changed, msg, changed_attrs_dict = unlock_user(client_obj, user_name)
+
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user_policy.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user_policy.py
new file mode 100644
index 00000000..a81b2092
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_user_policy.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the user policies on an HPE Nimble Storage group.
+module: hpe_nimble_user_policy
+options:
+ allowed_attempts:
+ required: False
+ type: int
+ description:
+ - Number of authentication attempts allowed before the user account is locked. Allowed range is [1, 10] inclusive. '0' indicates no limit.
+ digit:
+ required: False
+ type: int
+ description:
+ - Number of numerical characters required in user passwords. Allowed range is [0, 255] inclusive.
+ lower:
+ required: False
+ type: int
+ description:
+ - Number of lowercase characters required in user passwords. Allowed range is [0, 255] inclusive.
+ max_sessions:
+ required: False
+ type: int
+ description:
+ - Maximum number of sessions allowed for a group. Allowed range is [10, 1000] inclusive.
+ min_length:
+ required: False
+ type: int
+ description:
+ - Minimum length for user passwords. Allowed range is [8, 255] inclusive.
+ no_reuse:
+ required: False
+ type: int
+ description:
+ - Number of times that a password must change before you can reuse a previous password. Allowed range is [1,20] inclusive.
+ previous_diff:
+ required: False
+ type: int
+ description:
+ - Number of characters that must be different from the previous password. Allowed range is [1, 255] inclusive.
+ special:
+ required: False
+ type: int
+ description:
+ - Number of special characters required in user passwords. Allowed range is [0, 255] inclusive.
+ state:
+ required: True
+ choices:
+ - present
+ type: str
+ description:
+ - The user policy operation.
+ upper:
+ required: False
+ type: int
+ description:
+ - Number of uppercase characters required in user passwords. Allowed range is [0, 255] inclusive.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage user policies
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+- name: Update user policy
+ hpe.nimble.hpe_nimble_user_policy:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ upper: "{{ upper }}"
+ allowed_attempts: 2
+ min_length: 10
+ state: "present"
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def update_user_policy(
+ client_obj,
+ **kwargs):
+
+ try:
+ user_resp = client_obj.user_policies.get()
+ if utils.is_null_or_empty(user_resp):
+ return (False, False, "User policy is not present on Array", {}, {})
+
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(user_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ user_resp = client_obj.user_policies.update(id=user_resp.attrs.get("id"), **params)
+ return (True, True, f"Updated user policy successfully with following attributes '{changed_attrs_dict}'.", changed_attrs_dict, user_resp.attrs)
+ else:
+ return (True, False, "User Policy already present in given state.", {}, user_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"User Policy Update failed | {ex}", {}, {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present'
+ ],
+ "type": 'str'
+ },
+ "allowed_attempts": {
+ "required": False,
+ "type": "int"
+ },
+ "min_length": {
+ "required": False,
+ "type": "int"
+ },
+ "upper": {
+ "required": False,
+ "type": "int"
+ },
+ "lower": {
+ "required": False,
+ "type": "int"
+ },
+ "digit": {
+ "required": False,
+ "type": "int"
+ },
+ "special": {
+ "required": False,
+ "type": "int"
+ },
+ "previous_diff": {
+ "required": False,
+ "type": "int"
+ },
+ "no_reuse": {
+ "required": False,
+ "type": "int"
+ },
+ "max_sessions": {
+ "required": False,
+ "type": "int"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='the python nimble_sdk module is required')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ allowed_attempts = module.params["allowed_attempts"]
+ min_length = module.params["min_length"]
+ upper = module.params["upper"]
+ lower = module.params["lower"]
+ digit = module.params["digit"]
+ special = module.params["special"]
+ previous_diff = module.params["previous_diff"]
+ no_reuse = module.params["no_reuse"]
+ max_sessions = module.params["max_sessions"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(
+ msg="Storage system IP or username or password is null")
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States
+ if state == "present":
+ return_status, changed, msg, changed_attrs_dict, resp = update_user_policy(
+ client_obj,
+ allowed_attempts=allowed_attempts,
+ min_length=min_length,
+ upper=upper,
+ lower=lower,
+ digit=digit,
+ special=special,
+ previous_diff=previous_diff,
+ no_reuse=no_reuse,
+ max_sessions=max_sessions)
+
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume.py
new file mode 100644
index 00000000..87d76ec2
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume.py
@@ -0,0 +1,843 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the volumes on an HPE Nimble Storage group.
+module: hpe_nimble_volume
+options:
+ agent_type:
+ required: False
+ choices:
+ - none
+ - smis
+ - vvol
+ - openstack
+ - openstackv2
+ type: str
+ description:
+ - External management agent type.
+ app_uuid:
+ required: False
+ type: str
+ description:
+ - Application identifier of volume.
+ block_size:
+ required: False
+ type: int
+ description:
+ - Size in bytes of blocks in the volume.
+ cache_pinned:
+ required: False
+ type: bool
+ description:
+ - If set to true, all the contents of this volume are kept in flash cache.
+ caching:
+ required: False
+ type: bool
+ description:
+ - Indicate caching the volume is enabled.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing source volume.
+ clone:
+ required: False
+ type: bool
+ description:
+ - Whether this volume is a clone. Use this attribute in combination with name and snapshot to create a clone by setting clone = true.
+ dedupe:
+ required: False
+ type: bool
+ description:
+ - Indicate whether dedupe is enabled.
+ description:
+ required: False
+ type: str
+ default: null
+ description:
+ - Text description of volume.
+ destination:
+ required: False
+ type: str
+ description:
+ - Name of the destination pool where the volume is moving to.
+ encryption_cipher:
+ required: False
+ choices:
+ - none
+ - aes_256_xts
+ type: str
+ description:
+ - The encryption cipher of the volume.
+ folder:
+ required: False
+ type: str
+ description:
+ - Name of the folder holding this volume.
+ force:
+ required: False
+ type: bool
+ description:
+ - Forcibly offline, reduce size or change read-only status a volume.
+ force_vvol:
+ required: False
+ type: bool
+ default: False
+ description:
+ - Forcibly move a virtual volume.
+ iscsi_target_scope:
+ required: False
+ type: str
+ choices:
+ - volume
+ - group
+ description:
+ - This indicates whether volume is exported under iSCSI Group Target or iSCSI volume target. This attribute is only meaningful to iSCSI system.
+ limit:
+ required: False
+ type: int
+ description:
+ - Limit on the volume's mapped usage, expressed as a percentage of the volume's size.
+ limit_iops:
+ required: False
+ type: int
+ description:
+ - IOPS limit for this volume.
+ limit_mbps:
+ required: False
+ type: int
+ description:
+ - Throughput limit for this volume in MB/s.
+ metadata:
+ required: False
+ type: dict
+ description:
+ - User defined key-value pairs that augment an volume's attributes. List of key-value pairs. Keys must be unique and non-empty.
+ When creating an object, values must be non-empty. When updating an object, an empty value causes the corresponding key to be removed.
+ move:
+ required: False
+ type: bool
+ description:
+ - Move a volume to different pool.
+ multi_initiator:
+ required: False
+ type: bool
+ description:
+ - For iSCSI volume target, this flag indicates whether the volume and its snapshots can be accessed from multiple initiators at the same time.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the source volume.
+ online:
+ required: False
+ type: bool
+ description:
+ - Online state of volume, available for host initiators to establish connections.
+ owned_by_group:
+ required: False
+ type: str
+ description:
+ - Name of group that currently owns the volume.
+ parent:
+ required: False
+ type: str
+ description:
+ - Name of parent volume.
+ perf_policy:
+ required: False
+ type: str
+ default: null
+ description:
+ - Name of the performance policy. After creating a volume, performance policy for the volume can only be
+ changed to another performance policy with same block size.
+ pool:
+ required: False
+ type: str
+ description:
+ - Name associated with the pool in the storage pool table.
+ read_only:
+ required: False
+ type: bool
+ description:
+ - Volume is read-only.
+ size:
+ type: int
+ description:
+ - Volume size in megabytes. Size is required for creating a volume but not for cloning an existing volume.
+ snapshot:
+ required: False
+ type: str
+ description:
+ - Base snapshot name. This attribute is required together with name and clone when cloning a volume with the create operation.
+ state:
+ description:
+ - The volume operations.
+ choices:
+ - present
+ - absent
+ - create
+ - restore
+ required: True
+ type: str
+ thinly_provisioned:
+ required: False
+ type: bool
+ description:
+ - Set volume's provisioning level to thin.
+ volcoll:
+ required: False
+ type: str
+ description:
+ - Name of volume collection of which this volume is a member. Use this attribute in update operation to associate or dissociate volumes with or from
+ volume collections. When associating, set this attribute to the name of the volume collection. When dissociating, set this attribute to empty string.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage volumes
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# If state is "create", then create a volume if not present. Fails if already present.
+# if state is present, then create a volume if not present. Succeeds if it already exists.
+- name: Create volume if not present
+ hpe.nimble.hpe_nimble_volume:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: "{{ state | default('present') }}"
+ size: "{{ size }}"
+ limit_iops: "{{ limit_iops }}"
+ limit_mbps: 5000
+ force: false
+ metadata: "{{ metadata }}" # metadata = {'mykey1': 'myval1', 'mykey2': 'myval2'}
+ description: "{{ description }}"
+ name: "{{ name }}"
+
+- name: Changing volume "{{ name }}" to offline state
+ hpe.nimble.hpe_nimble_volume:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ online: False
+ state: present
+ name: "{{ name }}"
+
+- name: Changing volume "{{ name }}" to online state
+ hpe.nimble.hpe_nimble_volume:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ online: True
+ state: present
+ name: "{{ name }}"
+
+# Create a clone from the given snapshot name.
+# If snapshot name is not provided then a snapshot is created on the source volume.
+# Clone task only run if "parent" is specified. Snapshot is optional.
+- name: Create or Refresh a clone
+ hpe.nimble.hpe_nimble_volume:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}" # name here is the name of cloned volume
+ parent: "{{ parent | mandatory }}"
+ snapshot: "{{ snapshot | default(None)}}"
+ state: "{{ state | default('present') }}"
+ when:
+ - parent is defined
+
+- name: Destroy volume (must be offline)
+ hpe.nimble.hpe_nimble_volume:
+ name: "{{ name }}"
+ state: absent
+
+# If no snapshot is given, then restore volume to last snapshot. Fails if no snapshots exist.
+# If snapshot is provided, then restore volume from specified snapshot.
+- name: Restore volume "{{ name }}"
+ hpe.nimble.hpe_nimble_volume:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ snapshot: "{{ snapshot | default(None)}}"
+ state: restore
+
+- name: Delete volume "{{ name }}" (must be offline)
+ hpe.nimble.hpe_nimble_volume:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: absent
+
+- name: Move volume to pool
+ hpe.nimble.hpe_nimble_volume:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ move: true
+ name: "{{ name }}"
+ state: present
+ destination: "{{ destination | mandatory }}"
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+ from nimbleclient import exceptions
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+# from hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+from enum import Enum
+
+
+class Vol_Operation(Enum):
+ SUCCESS = 0
+ ALREADY_EXISTS = -1
+ FAILED = 1
+
+# util functions
+
+
+def move_volume(
+ client_obj,
+ vol_name,
+ dest_pool,
+ force_vvol):
+
+ if utils.is_null_or_empty(vol_name):
+ return (False, False, "Volume move failed as volume name is null.", {}, {})
+
+ if utils.is_null_or_empty(dest_pool):
+ return (False, False, "Volume move failed as destination pool is null.", {}, {})
+ try:
+ vol_resp = client_obj.volumes.get(id=None, name=vol_name)
+ if utils.is_null_or_empty(vol_resp):
+ return (False, False, f"Volume '{vol_name}' not present to move.", {}, {})
+
+ resp = client_obj.volumes.move(id=vol_resp.attrs.get("id"), dest_pool_id=utils.get_pool_id(client_obj, dest_pool), force_vvol=force_vvol)
+ return (True, True, f"Volume '{vol_resp.attrs.get('name')}' moved successfully.", {}, resp)
+ except Exception as ex:
+ return (False, False, f"Volume move failed | '{ex}'", {}, {})
+
+
+def update_volume(
+ client_obj,
+ vol_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(vol_resp):
+ return (False, False, "Invalid volume to update.", {}, {})
+ try:
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(vol_resp, **kwargs)
+
+ if 'volcoll_name' in kwargs:
+ if kwargs['volcoll_name'] == "" and vol_resp.attrs.get('volcoll_id') != "":
+ params['volcoll_id'] = ""
+ changed_attrs_dict['volcoll_id'] = ""
+ else:
+ if 'volcoll_name' in params:
+ params.pop('volcoll_name')
+ changed_attrs_dict.pop('volcoll_name')
+
+ if changed_attrs_dict.__len__() > 0:
+ resp = client_obj.volumes.update(id=vol_resp.attrs.get("id"), **params)
+ return (True, True, f"Volume '{vol_resp.attrs.get('name')}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, resp.attrs)
+ else:
+ return (True, False, f"Volume '{vol_resp.attrs.get('name')}' already present in given state.", {}, vol_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Volume update failed '{ex}'", {}, {})
+
+
+def create_volume(
+ client_obj,
+ vol_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(vol_name):
+ return (False, False, "Volume creation failed as volume name is null.", {}, {})
+
+ try:
+ vol_resp = client_obj.volumes.get(id=None, name=vol_name)
+ # remove unchanged and null arguments from kwargs
+ params = utils.remove_null_args(**kwargs)
+ if utils.is_null_or_empty(vol_resp):
+ resp = client_obj.volumes.create(vol_name, **params)
+ return (True, True, f"Created volume '{vol_name}' successfully.", {}, resp.attrs)
+ else:
+ return (False, False, f"Volume '{vol_name}' cannot be created as it is already present in given state.", {}, {})
+ except Exception as ex:
+ return (False, False, f"Volume creation failed '{ex}'", {}, {})
+
+
+def delete_volume(client_obj, vol_name):
+ if utils.is_null_or_empty(vol_name):
+ return (False, False, "Volume deletion failed as volume name is null.", {})
+
+ try:
+ vol_resp = client_obj.volumes.get(id=None, name=vol_name)
+ if utils.is_null_or_empty(vol_resp):
+ return (False, False, f"Volume '{vol_name}' not present to delete.", {})
+ else:
+ client_obj.volumes.delete(id=vol_resp.attrs.get("id"))
+ return (True, True, f"Deleted volume '{vol_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Volume deletion for {vol_name} failed '{ex}'", {})
+
+
+def restore_volume(client_obj, vol_name, snapshot_to_restore=None):
+ if utils.is_null_or_empty(vol_name):
+ return (False, False, "Volume restore failed as volume name is null.", {}, {})
+ try:
+ vol_resp = client_obj.volumes.get(id=None, name=vol_name)
+ if utils.is_null_or_empty(vol_resp):
+ return (False, False, f"Volume '{vol_name}' not present to restore.", {}, {})
+
+ if utils.is_null_or_empty(snapshot_to_restore):
+ # restore the volume to the last snapshot
+ snap_list_resp = client_obj.snapshots.list(vol_name=vol_name)
+ if utils.is_null_or_empty(snap_list_resp):
+ return (False, False, f"Volume '{vol_name}' cannot be restored as no snapshot is present in source volume.", {}, {})
+ snap_resp = snap_list_resp[-1]
+ snapshot_to_restore = snap_resp.attrs.get("name")
+ else:
+ # get the snapshot detail from the given source vol
+ snap_resp = client_obj.snapshots.get(vol_name=vol_name, name=snapshot_to_restore)
+ if utils.is_null_or_empty(snap_resp):
+ return (False, False, f"Volume '{vol_name}' cannot not be restored as given snapshot name '{snapshot_to_restore}' is not present in"
+ "source volume.", {}, {})
+
+ # offline and restore
+ client_obj.volumes.offline(id=vol_resp.attrs.get("id"))
+ resp = client_obj.volumes.restore(base_snap_id=snap_resp.attrs.get("id"),
+ id=vol_resp.attrs.get("id"))
+ # bring volume online
+ client_obj.volumes.online(id=vol_resp.attrs.get("id"))
+ return (True, True, f"Restored volume '{vol_name}' from snapshot '{snapshot_to_restore}' successfully.", {}, resp)
+ except Exception as ex:
+ return (False, False, f"Volume restore failed '{ex}'", {}, {})
+
+
+# given a snapshot name,create a clone.
+# return code
+# SUCCESS = 0
+# ALREADY_EXISTS = -1
+# FAILED = 1
+def create_clone_from_snapshot(
+ client_obj,
+ snap_list_resp,
+ vol_name,
+ snapshot_to_clone,
+ state):
+ # if client_obj is None or not snap_list_resp or snap_list_resp is None or parent is None:
+ if (utils.is_null_or_empty(client_obj)
+ or utils.is_null_or_empty(vol_name)
+ or utils.is_null_or_empty(snap_list_resp)
+ or utils.is_null_or_empty(snapshot_to_clone)):
+ return (False, "Create clone from snapshot failed as valid arguments are not provided. Please check the argument provided for volume and snapshot.", {})
+ try:
+ # to_return = Vol_Operation.FAILED # assume failed
+ # find the snapshot from snapshot list
+ for snap_obj in snap_list_resp:
+ if snap_obj.attrs.get("name") == snapshot_to_clone:
+ # create
+ resp = client_obj.volumes.create(name=vol_name,
+ base_snap_id=snap_obj.attrs.get("id"),
+ clone=True)
+ if utils.is_null_or_empty(resp) is False:
+ return (Vol_Operation.SUCCESS, f'{vol_name}', resp.attrs)
+ return (Vol_Operation.FAILED)
+ except exceptions.NimOSAPIError as ex:
+ if "SM_eexist" in str(ex):
+ # check the state. if it set to present then return true
+ if state == "present":
+ return (Vol_Operation.ALREADY_EXISTS, f"Cloned volume '{vol_name}' is already present in given state.", {})
+ else:
+ return (Vol_Operation.FAILED, f"Create clone from snapshot failed as cloned volume '{vol_name}' already exist| {ex}", {})
+ except Exception as ex:
+ return (Vol_Operation.FAILED, f"Create clone from snapshot failed | {ex}", {})
+
+
+def clone_volume(
+ client_obj,
+ parent,
+ state,
+ vol_name=None,
+ snapshot_to_clone=None):
+
+ if utils.is_null_or_empty(vol_name):
+ return (False, False, "Clone operation failed. Clone volume name is not present.", {}, {})
+ # this function will handle 2 scenario.
+ # If snapshot name is given. we try to clone from that else
+ # we first create a snapshot of source volume and then
+ # clone from the snapshot
+ try:
+ if utils.is_null_or_empty(snapshot_to_clone):
+ if utils.is_null_or_empty(parent):
+ return (False, False, "Clone operation failed. Parent volume name is not present.", {}, {})
+ # get the vol id
+ vol_resp = client_obj.volumes.get(name=parent)
+ if utils.is_null_or_empty(vol_resp):
+ return (False, False, "Clone operation failed. Parent volume name is not present.", {}, {})
+ else:
+ # create a temp snapshot
+ snapshot_to_clone = utils.get_unique_string("ansible-snapshot")
+ snap_resp = client_obj.snapshots.create(name=snapshot_to_clone,
+ vol_id=vol_resp.attrs.get("id"),
+ description="created by ansible",
+ online=False,
+ writable=False)
+ if utils.is_null_or_empty(snap_resp):
+ return (False, False, "Clone Operation Failed as could not create a snapshot from source volume", {}, {})
+ # create clone
+ clonevol_resp, msg, resp = create_clone_from_snapshot(client_obj, [snap_resp], vol_name, snapshot_to_clone, state)
+ if clonevol_resp == Vol_Operation.ALREADY_EXISTS or clonevol_resp == Vol_Operation.FAILED:
+ # remove the snapshot
+ client_obj.snapshots.delete(id=snap_resp.attrs.get("id"))
+ else:
+ # get the snapshot detail from the given source vol
+ snap_list_resp = client_obj.snapshots.list(vol_name=parent, name=snapshot_to_clone)
+ if utils.is_null_or_empty(snap_list_resp):
+ return (False, False, f"Could not create clone volume '{vol_name}' as given snapshot name '{snapshot_to_clone}' is not present "
+ "in parent volume", {}, {})
+ # create clone
+ clonevol_resp, msg, resp = create_clone_from_snapshot(client_obj, snap_list_resp, vol_name, snapshot_to_clone, state)
+
+ if clonevol_resp is Vol_Operation.SUCCESS:
+ return (True, True, f"Successfully created cloned volume '{msg}'", {}, resp)
+ elif clonevol_resp is Vol_Operation.FAILED:
+ return (False, False, f"Failed to clone volume. Msg: '{msg}'", {}, {})
+ elif clonevol_resp == Vol_Operation.ALREADY_EXISTS:
+ return (True, False, f" '{msg}'", {}, {})
+ except Exception as ex:
+ return (False, False, f"clone volume operation Failed '{ex}'", {}, {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create',
+ 'restore'
+ ],
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "size": {
+ "type": "int"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "perf_policy": {
+ "required": False,
+ "type": "str"
+ },
+ "limit": {
+ "required": False,
+ "type": "int",
+ },
+ "online": {
+ "required": False,
+ "type": "bool"
+ },
+ "owned_by_group": {
+ "required": False,
+ "type": "str"
+ },
+ "multi_initiator": {
+ "required": False,
+ "type": "bool"
+ },
+ "iscsi_target_scope": {
+ "required": False,
+ "choices": ['volume', 'group'],
+ "type": "str"
+ },
+ "pool": {
+ "required": False,
+ "type": "str"
+ },
+ "read_only": {
+ "required": False,
+ "type": "bool"
+ },
+ "block_size": {
+ "required": False,
+ "type": "int"
+ },
+ "clone": {
+ "required": False,
+ "type": "bool"
+ },
+ "agent_type": {
+ "required": False,
+ "choices": ['none', 'smis', 'vvol', 'openstack', 'openstackv2'],
+ "type": "str"
+ },
+ "destination": {
+ "required": False,
+ "type": "str"
+ },
+ "cache_pinned": {
+ "required": False,
+ "type": "bool"
+ },
+ "thinly_provisioned": {
+ "required": False,
+ "type": "bool"
+ },
+ "encryption_cipher": {
+ "required": False,
+ "choices": ['none', 'aes_256_xts'],
+ "type": "str"
+ },
+ "app_uuid": {
+ "required": False,
+ "type": "str"
+ },
+ "folder": {
+ "required": False,
+ "type": "str",
+ },
+ "dedupe": {
+ "required": False,
+ "type": "bool"
+ },
+ "limit_iops": {
+ "required": False,
+ "type": "int"
+ },
+ "limit_mbps": {
+ "required": False,
+ "type": "int"
+ },
+ "parent": {
+ "required": False,
+ "type": "str"
+ },
+ "snapshot": {
+ "required": False,
+ "type": "str"
+ },
+ "volcoll": {
+ "required": False,
+ "type": "str"
+ },
+ "metadata": {
+ "required": False,
+ "type": "dict"
+ },
+ "force": {
+ "required": False,
+ "type": "bool"
+ },
+ "caching": {
+ "required": False,
+ "type": "bool"
+ },
+ "force_vvol": {
+ "required": False,
+ "type": "bool",
+ "default": False
+ },
+ "move": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ required_if = [('state', 'restore', ['snapshot'])]
+
+ module = AnsibleModule(argument_spec=fields, required_if=required_if)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ state = module.params["state"]
+ vol_name = module.params["name"]
+ change_name = module.params["change_name"]
+ size = module.params["size"]
+ description = module.params["description"]
+ perf_policy = module.params["perf_policy"]
+ limit = module.params["limit"]
+ online = module.params["online"]
+ owned_by_group = module.params["owned_by_group"]
+ multi_initiator = module.params["multi_initiator"]
+ iscsi_target_scope = module.params["iscsi_target_scope"]
+ pool = module.params["pool"]
+ read_only = module.params["read_only"]
+ block_size = module.params["block_size"]
+ clone = module.params["clone"]
+ agent_type = module.params["agent_type"]
+ dest_pool = module.params["destination"]
+ cache_pinned = module.params["cache_pinned"]
+ thinly_provisioned = module.params["thinly_provisioned"]
+ encryption_cipher = module.params["encryption_cipher"]
+ app_uuid = module.params["app_uuid"]
+ folder = module.params["folder"]
+ dedupe = module.params["dedupe"]
+ limit_iops = module.params["limit_iops"]
+ limit_mbps = module.params["limit_mbps"]
+ parent = module.params["parent"] # used for cloning
+ snapshot = module.params["snapshot"]
+ volcoll = module.params["volcoll"]
+ metadata = module.params["metadata"]
+ force = module.params["force"]
+ caching = module.params["caching"]
+ force_vvol = module.params["force_vvol"]
+ move = module.params["move"]
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(msg="Missing variables: hostname, username and password is mandatory.")
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+ # States
+ if move is True and state == "present":
+ if utils.is_null_or_empty(dest_pool) is False:
+ return_status, changed, msg, changed_attrs_dict, resp = move_volume(client_obj, vol_name, dest_pool, force_vvol)
+ else:
+ module.fail_json(msg="Volume move failed as destination pool is null.")
+
+ elif (move is None or move is False) and (state == "create" or state == "present"):
+ if utils.is_null_or_empty(vol_name):
+ return_status = changed = False
+ msg = "Volume creation failed as volume name is null"
+
+ # state create/present can be provided for creating a new volume or
+ # creating a clone from source volume
+ if parent is not None:
+ return_status, changed, msg, changed_attrs_dict, resp = clone_volume(
+ client_obj, parent, state,
+ vol_name, snapshot)
+ else:
+ vol_resp = client_obj.volumes.get(id=None, name=vol_name)
+ if utils.is_null_or_empty(vol_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_volume(
+ client_obj, vol_name,
+ perfpolicy_id=utils.get_perfpolicy_id(client_obj, perf_policy),
+ size=size,
+ description=description,
+ limit=limit,
+ online=online,
+ owned_by_group_id=utils.get_owned_by_group_id(client_obj, owned_by_group),
+ multi_initiator=multi_initiator,
+ iscsi_target_scope=iscsi_target_scope,
+ pool_id=utils.get_pool_id(client_obj, pool),
+ read_only=read_only,
+ block_size=block_size,
+ clone=clone,
+ agent_type=agent_type,
+ dest_pool_id=utils.get_pool_id(client_obj, dest_pool),
+ cache_pinned=cache_pinned,
+ thinly_provisioned=thinly_provisioned,
+ encryption_cipher=encryption_cipher,
+ app_uuid=app_uuid,
+ folder_id=utils.get_folder_id(client_obj, folder),
+ metadata=metadata,
+ dedupe_enabled=dedupe,
+ limit_iops=limit_iops,
+ limit_mbps=limit_mbps)
+ else:
+ return_status, changed, msg, changed_attrs_dict, resp = update_volume(
+ client_obj,
+ vol_resp,
+ name=change_name,
+ volcoll_name=volcoll,
+ size=size,
+ description=description,
+ perfpolicy_id=utils.get_perfpolicy_id(client_obj, perf_policy),
+ limit=limit,
+ online=online,
+ owned_by_group_id=utils.get_owned_by_group_id(client_obj, owned_by_group),
+ multi_initiator=multi_initiator,
+ iscsi_target_scope=iscsi_target_scope,
+ read_only=read_only,
+ block_size=block_size,
+ volcoll_id=utils.get_volcoll_id(client_obj, volcoll),
+ agent_type=agent_type,
+ force=force,
+ cache_pinned=cache_pinned,
+ thinly_provisioned=thinly_provisioned,
+ app_uuid=app_uuid,
+ folder_id=utils.get_folder_id(client_obj, folder),
+ metadata=metadata,
+ caching_enabled=caching,
+ dedupe_enabled=dedupe,
+ limit_iops=limit_iops,
+ limit_mbps=limit_mbps)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_volume(client_obj, vol_name)
+
+ elif state == "restore":
+ return_status, changed, msg, changed_attrs_dict, resp = restore_volume(client_obj, vol_name, snapshot)
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume_collection.py b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume_collection.py
new file mode 100644
index 00000000..ff91f016
--- /dev/null
+++ b/ansible_collections/hpe/nimble/plugins/modules/hpe_nimble_volume_collection.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+
+# Copyright 2020 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed
+# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+# OF ANY KIND, either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+# author Alok Ranjan (alok.ranjan2@hpe.com)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
+description: Manage the volume collections on an HPE Nimble Storage group.
+module: hpe_nimble_volume_collection
+options:
+ abort_handover:
+ required: False
+ type: bool
+ description:
+ - Abort in-progress handover. If for some reason a previously invoked handover request is unable to complete, this action can be used to cancel it.
+ This operation is not supported for synchronous replication volume collections.
+ agent_hostname:
+ required: False
+ type: str
+ description:
+ - Generic backup agent hostname.
+ agent_password:
+ required: False
+ type: str
+ description:
+ - Generic backup agent password.
+ agent_username:
+ required: False
+ type: str
+ description:
+ - Generic backup agent username.
+ app_cluster:
+ required: False
+ type: str
+ description:
+ - If the application is running within a Windows cluster environment, this is the cluster name.
+ app_id:
+ required: False
+ choices:
+ - inval
+ - exchange
+ - exchange_dag
+ - hyperv
+ - sql2005
+ - sql2008
+ - sql2012
+ - sql2014
+ - sql2016
+ - sql2017
+ type: str
+ description:
+ - Application ID running on the server.
+ app_server:
+ required: False
+ type: str
+ description:
+ - Application server hostname.
+ app_service:
+ required: False
+ type: str
+ description:
+ - If the application is running within a windows cluster environment then this is the instance name of the service running within the cluster environment.
+ app_sync:
+ choices:
+ - none
+ - vss
+ - vmware
+ - generic
+ required: False
+ type: str
+ description:
+ - Application synchronization.
+ change_name:
+ required: False
+ type: str
+ description:
+ - Change name of the existing volume collection.
+ demote:
+ required: False
+ type: bool
+ description:
+ - Release ownership of the specified volume collection. The volumes associated with the volume collection will be set to offline and
+ a snapshot will be created, then full control over the volume collection will be transferred to the new owner. This option can be used
+ following a promote to revert the volume collection back to its prior configured state. This operation does not alter the configuration on
+ the new owner itself, but does require the new owner to be running in order to obtain its identity information. This operation is not supported
+ for synchronous replication volume collections.
+ description:
+ required: False
+ type: str
+ description:
+ - Text description of volume collection.
+ handover:
+ required: False
+ type: bool
+ description:
+ - Gracefully transfer ownership of the specified volume collection. This action can be used to pass control of the volume collection
+ to the downstream replication partner. Ownership and full control over the volume collection will be given to the downstream replication
+ partner. The volumes associated with the volume collection will be set to offline prior to the final snapshot being taken and replicated,
+ thus ensuring full data synchronization as part of the transfer. By default, the new owner will automatically begin replicating the volume
+ collection back to this node when the handover completes.
+ invoke_on_upstream_partner:
+ required: False
+ type: bool
+ description:
+ - Invoke handover request on upstream partner. This operation is not supported for synchronous replication volume collections.
+ is_standalone_volcoll:
+ required: False
+ type: bool
+ description:
+ - Indicates whether this is a standalone volume collection.
+ metadata:
+ required: False
+ type: dict
+ description:
+ - User defined key-value pairs that augment a volume collection attributes. List of key-value pairs. Keys must be unique and non-empty.
+ When creating an object, values must be non-empty. When updating an object, an empty value causes the corresponding key to be removed.
+ name:
+ required: True
+ type: str
+ description:
+ - Name of the volume collection.
+ no_reverse:
+ required: False
+ type: bool
+ description:
+ - Do not automatically reverse direction of replication.
+ Using this argument will prevent the new owner from automatically replicating the volume collection to this node when the handover completes.
+ override_upstream_down:
+ required: False
+ type: bool
+ description:
+ - Allow the handover request to proceed even if upstream array is down. The default behavior is to return an error when upstream is down.
+ This option is applicable for synchronous replication only.
+ promote:
+ required: False
+ type: bool
+ description:
+ - Take ownership of the specified volume collection. The volumes associated with the volume collection will be set to online and be
+ available for reading and writing. Replication will be disabled on the affected schedules and must be re-configured if desired. Snapshot
+ retention for the affected schedules will be set to the greater of the current local or replica retention values. This operation is not
+ supported for synchronous replication volume collections.
+ prot_template:
+ required: False
+ type: str
+ description:
+ - Name of the protection template whose attributes will be used to create this volume collection.
+ This attribute is only used for input when creating a volume collection and is not outputted.
+ replication_partner:
+ required: False
+ type: str
+ description:
+ - Name of the new volume collection owner.
+ replication_type:
+ choices:
+ - periodic_snapshot
+ - synchronous
+ required: False
+ type: str
+ description:
+ - Type of replication configured for the volume collection.
+ state:
+ required: True
+ choices:
+ - present
+ - absent
+ - create
+ type: str
+ description:
+ - The volume collection operations.
+ validate:
+ required: False
+ type: bool
+ description:
+ - Validate a volume collection with either Microsoft VSS or VMware application synchronization.
+ vcenter_hostname:
+ required: False
+ type: str
+ description:
+ - VMware vCenter hostname.
+ vcenter_username:
+ required: False
+ type: str
+ description:
+ - Application VMware vCenter username. String of up to 80 alphanumeric characters, beginning with a letter.
+ It can include ampersand (@), backslash (\), dash (-), period (.), and underscore (_).
+ vcenter_password:
+ required: False
+ type: str
+ description:
+ - Application VMware vCenter password. A password with few constraints.
+extends_documentation_fragment: hpe.nimble.hpe_nimble
+short_description: Manage the HPE Nimble Storage volume collections
+version_added: "1.0.0"
+notes:
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+
+# if state is create , then create a volcoll if not present. Fails if already present.
+# if state is present, then create a volcoll if not present. Succeed if it already exists.
+- name: Create volume collection if not present
+ hpe.nimble.hpe_nimble_volume_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ description: "{{ description | default(None)}}"
+ state: "{{ state | default('present') }}"
+
+- name: Delete volume collection
+ hpe.nimble.hpe_nimble_volume_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: absent
+
+- name: Promote volume collection
+ hpe.nimble.hpe_nimble_volume_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: present
+ promote: True
+
+- name: Demote volume collection
+ hpe.nimble.hpe_nimble_volume_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: present
+ demote: True
+
+- name: Handover volume collection
+ hpe.nimble.hpe_nimble_volume_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: present
+ handover: True
+
+- name: Abort handover volume collection
+ hpe.nimble.hpe_nimble_volume_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: present
+ abort_handover: True
+
+- name: Validate volume collection
+ hpe.nimble.hpe_nimble_volume_collection:
+ host: "{{ host }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "{{ name }}"
+ state: present
+ validate: True
+
+'''
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from nimbleclient.v1 import client
+except ImportError:
+ client = None
+from ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble import __version__ as NIMBLE_ANSIBLE_VERSION
+import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
+
+
+def create_volcoll(
+ client_obj,
+ volcoll_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(volcoll_name):
+ return (False, False, "Create volume collection failed as volume collection is not present.", {}, {})
+ try:
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ if utils.is_null_or_empty(volcoll_resp):
+ params = utils.remove_null_args(**kwargs)
+ volcoll_resp = client_obj.volume_collections.create(name=volcoll_name, **params)
+ return (True, True, f"Created volume collection '{volcoll_name}' successfully.", {}, volcoll_resp.attrs)
+ else:
+ return (False, False, f"Volume collection '{volcoll_name}' cannot be created as it is already present in given state.", {}, {})
+ except Exception as ex:
+ return (False, False, f"Volume collection creation failed | {ex}", {}, {})
+
+
+def update_volcoll(
+ client_obj,
+ volcoll_resp,
+ **kwargs):
+
+ if utils.is_null_or_empty(volcoll_resp):
+ return (False, False, "Update volume collection failed as volume collection is not present.", {}, {})
+ try:
+ volcoll_name = volcoll_resp.attrs.get("name")
+ changed_attrs_dict, params = utils.remove_unchanged_or_null_args(volcoll_resp, **kwargs)
+ if changed_attrs_dict.__len__() > 0:
+ volcoll_resp = client_obj.volume_collections.update(id=volcoll_resp.attrs.get("id"), **params)
+ return (True, True, f"Volume collection '{volcoll_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
+ changed_attrs_dict, volcoll_resp.attrs)
+ else:
+ return (True, False, f"Volume collection '{volcoll_name}' already present in given state.", {}, volcoll_resp.attrs)
+ except Exception as ex:
+ return (False, False, f"Volume collection update failed | {ex}", {}, {})
+
+
+def delete_volcoll(client_obj, volcoll_name):
+
+ if utils.is_null_or_empty(volcoll_name):
+ return (False, False, "Delete volume collection failed as volume collection name is null.", {})
+
+ try:
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ if utils.is_null_or_empty(volcoll_resp):
+ return (False, False, f"Volume collection '{volcoll_name}' not present to delete.", {})
+ else:
+
+ client_obj.volume_collections.delete(id=volcoll_resp.attrs.get("id"))
+ return (True, True, f"Deleted volume collection '{volcoll_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Volume collection deletion failed | {ex}", {})
+
+
+def promote_volcoll(client_obj, volcoll_name):
+
+ if utils.is_null_or_empty(volcoll_name):
+ return (False, False, "Promote volume collection failed as volume collection name is null.", {})
+
+ try:
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ if utils.is_null_or_empty(volcoll_resp):
+ return (False, False, f"Volume collection '{volcoll_name}' not present to promote.", {})
+ else:
+ client_obj.volume_collections.promote(id=volcoll_resp.attrs.get("id"))
+ return (True, True, f"Promoted volume collection '{volcoll_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Promote volume collection failed | {ex}", {})
+
+
+def demote_volcoll(
+ client_obj,
+ volcoll_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(volcoll_name):
+ return (False, False, "Demote volume collection failed as volume collection name is null.", {})
+
+ try:
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ params = utils.remove_null_args(**kwargs)
+ if utils.is_null_or_empty(volcoll_resp):
+ return (False, False, f"Volume collection '{volcoll_name}' not present to demote.", {})
+ else:
+ client_obj.volume_collections.demote(id=volcoll_resp.attrs.get("id"), **params)
+ return (True, True, f"Demoted volume collection '{volcoll_name}' successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Demote volume collection failed | {ex}", {})
+
+
+def handover_volcoll(
+ client_obj,
+ volcoll_name,
+ **kwargs):
+
+ if utils.is_null_or_empty(volcoll_name):
+ return (False, False, "Handover of volume collection failed as volume collection name is null.", {})
+
+ try:
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ params = utils.remove_null_args(**kwargs)
+ if utils.is_null_or_empty(volcoll_resp):
+ return (False, False, f"Volume collection '{volcoll_name}' not present for handover.", {})
+ else:
+ client_obj.volume_collections.handover(id=volcoll_resp.attrs.get("id"), **params)
+ return (True, True, f"Handover of volume collection '{volcoll_name}' done successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Handover of volume collection failed | {ex}", {})
+
+
+def abort_handover_volcoll(
+ client_obj,
+ volcoll_name):
+
+ if utils.is_null_or_empty(volcoll_name):
+ return (False, False, "Abort handover of volume collection failed as volume collection name is null.", {})
+
+ try:
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ if utils.is_null_or_empty(volcoll_resp):
+ return (False, False, f"Volume collection '{volcoll_name}' not present for abort handover.", {})
+ else:
+ client_obj.volume_collections.abort_handover(id=volcoll_resp.attrs.get("id"))
+ return (True, True, f"Abort handover of volume collection '{volcoll_name}' done successfully.", {})
+ except Exception as ex:
+ return (False, False, f"Abort handover of volume collection failed | {ex}", {})
+
+
+def validate_volcoll(
+ client_obj,
+ volcoll_name):
+
+ if utils.is_null_or_empty(volcoll_name):
+ return (False, False, "Validate volume collection failed as volume collection name is null.", {}, {})
+
+ try:
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ if utils.is_null_or_empty(volcoll_resp):
+ return (False, False, f"Volume collection '{volcoll_name}' not present for validation.", {}, {})
+ else:
+ volcoll_validate_resp = client_obj.volume_collections.validate(id=volcoll_resp.attrs.get("id"))
+ if hasattr(volcoll_validate_resp, 'attrs'):
+ volcoll_validate_resp = volcoll_validate_resp.attrs
+ return (True, False, f"Validation of volume collection '{volcoll_name}' done successfully.", {}, volcoll_validate_resp)
+ except Exception as ex:
+ return (False, False, f"Validation of volume collection failed | {ex}", {}, {})
+
+
+def main():
+
+ fields = {
+ "state": {
+ "required": True,
+ "choices": ['present',
+ 'absent',
+ 'create'
+ ],
+ "type": "str"
+ },
+ "prot_template": {
+ "required": False,
+ "type": "str"
+ },
+ "name": {
+ "required": True,
+ "type": "str"
+ },
+ "change_name": {
+ "required": False,
+ "type": "str"
+ },
+ "description": {
+ "required": False,
+ "type": "str"
+ },
+ "replication_type": {
+ "choices": ['periodic_snapshot', 'synchronous'],
+ "required": False,
+ "type": "str"
+ },
+ "app_sync": {
+ "choices": ['none', 'vss', 'vmware', 'generic'],
+ "required": False,
+ "type": "str"
+ },
+ "app_server": {
+ "required": False,
+ "type": "str"
+ },
+ "app_id": {
+ "required": False,
+ "choices": ['inval', 'exchange', 'exchange_dag', 'hyperv', 'sql2005', 'sql2008', 'sql2012', 'sql2014', 'sql2016', 'sql2017'],
+ "type": "str"
+ },
+ "app_cluster": {
+ "required": False,
+ "type": "str"
+ },
+ "app_service": {
+ "required": False,
+ "type": "str"
+ },
+ "vcenter_hostname": {
+ "required": False,
+ "type": "str"
+ },
+ "vcenter_username": {
+ "required": False,
+ "type": "str"
+ },
+ "vcenter_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "agent_hostname": {
+ "required": False,
+ "type": "str"
+ },
+ "agent_username": {
+ "required": False,
+ "type": "str"
+ },
+ "agent_password": {
+ "required": False,
+ "type": "str",
+ "no_log": True
+ },
+ "is_standalone_volcoll": {
+ "required": False,
+ "type": "bool"
+ },
+ "metadata": {
+ "required": False,
+ "type": "dict"
+ },
+ "promote": {
+ "required": False,
+ "type": "bool"
+ },
+ "demote": {
+ "required": False,
+ "type": "bool"
+ },
+ "handover": {
+ "required": False,
+ "type": "bool"
+ },
+ "abort_handover": {
+ "required": False,
+ "type": "bool"
+ },
+ "validate": {
+ "required": False,
+ "type": "bool"
+ },
+ "replication_partner": {
+ "required": False,
+ "type": "str"
+ },
+ "invoke_on_upstream_partner": {
+ "required": False,
+ "type": "bool"
+ },
+ "no_reverse": {
+ "required": False,
+ "type": "bool"
+ },
+ "override_upstream_down": {
+ "required": False,
+ "type": "bool"
+ }
+ }
+ default_fields = utils.basic_auth_arg_fields()
+ fields.update(default_fields)
+ module = AnsibleModule(argument_spec=fields)
+ if client is None:
+ module.fail_json(msg='Python nimble-sdk could not be found.')
+
+ hostname = module.params["host"]
+ username = module.params["username"]
+ password = module.params["password"]
+ state = module.params["state"]
+ prot_template = module.params["prot_template"]
+ volcoll_name = module.params["name"]
+ change_name = module.params["change_name"]
+ description = module.params["description"]
+ replication_type = module.params["replication_type"]
+ app_sync = module.params["app_sync"]
+ app_server = module.params["app_server"]
+ app_id = module.params["app_id"]
+ app_cluster = module.params["app_cluster"]
+ app_service = module.params["app_service"]
+ vcenter_hostname = module.params["vcenter_hostname"]
+ vcenter_username = module.params["vcenter_username"]
+ vcenter_password = module.params["vcenter_password"]
+ agent_hostname = module.params["agent_hostname"]
+ agent_username = module.params["agent_username"]
+ agent_password = module.params["agent_password"]
+ is_standalone_volcoll = module.params["is_standalone_volcoll"]
+ metadata = module.params["metadata"]
+ promote = module.params["promote"]
+ demote = module.params["demote"]
+ handover = module.params["handover"]
+ abort_handover = module.params["abort_handover"]
+ validate = module.params["validate"]
+ replication_partner = module.params["replication_partner"]
+ invoke_on_upstream_partner = module.params["invoke_on_upstream_partner"]
+ no_reverse = module.params["no_reverse"]
+ override_upstream_down = module.params["override_upstream_down"]
+
+ if (username is None or password is None or hostname is None):
+ module.fail_json(msg="Missing variables: hostname, username and password is mandatory.")
+
+ # defaults
+ return_status = changed = False
+ msg = "No task to run."
+ resp = None
+
+ try:
+ client_obj = client.NimOSClient(
+ hostname,
+ username,
+ password,
+ f"HPE Nimble Ansible Modules v{NIMBLE_ANSIBLE_VERSION}"
+ )
+
+ # States.
+ if state == 'present' and promote is True:
+ return_status, changed, msg, changed_attrs_dict = promote_volcoll(client_obj, volcoll_name)
+
+ elif state == 'present' and demote is True:
+ return_status, changed, msg, changed_attrs_dict = demote_volcoll(
+ client_obj,
+ volcoll_name,
+ invoke_on_upstream_partner=invoke_on_upstream_partner,
+ replication_partner_id=utils.get_replication_partner_id(client_obj, replication_partner))
+
+ elif state == 'present' and handover is True:
+ replication_partner_id = utils.get_replication_partner_id(client_obj, replication_partner)
+ if utils.is_null_or_empty(replication_partner_id) is True:
+ module.fail_json(msg="Handover for volume collection failed. Please provide a valid replication partner.")
+
+ return_status, changed, msg, changed_attrs_dict = handover_volcoll(
+ client_obj,
+ volcoll_name,
+ invoke_on_upstream_partner=invoke_on_upstream_partner,
+ no_reverse=no_reverse,
+ override_upstream_down=override_upstream_down,
+ replication_partner_id=replication_partner_id)
+
+ elif state == 'present' and abort_handover is True:
+ return_status, changed, msg, changed_attrs_dict = abort_handover_volcoll(client_obj, volcoll_name)
+
+ elif state == 'present' and validate is True:
+ return_status, changed, msg, changed_attrs_dict, resp = validate_volcoll(client_obj, volcoll_name)
+
+ elif ((promote is None or promote is False)
+ and (demote is None or demote is False)
+ and (abort_handover is None or abort_handover is False)
+ and (handover is None or handover is False)
+ and (validate is None or validate is False)
+ and (state == "create" or state == "present")):
+
+ volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
+ if utils.is_null_or_empty(volcoll_resp) or state == "create":
+ return_status, changed, msg, changed_attrs_dict, resp = create_volcoll(
+ client_obj,
+ volcoll_name,
+ prottmpl_id=utils.get_prottmpl_id(client_obj, prot_template),
+ description=description,
+ replication_type=replication_type,
+ app_sync=app_sync,
+ app_server=app_server,
+ app_id=app_id,
+ app_cluster=app_cluster,
+ app_service=app_service,
+ vcenter_hostname=vcenter_hostname,
+ vcenter_username=vcenter_username,
+ vcenter_password=vcenter_password,
+ agent_hostname=agent_hostname,
+ agent_username=agent_username,
+ agent_password=agent_password,
+ is_standalone_volcoll=is_standalone_volcoll,
+ metadata=metadata)
+ else:
+ # update op
+ return_status, changed, msg, changed_attrs_dict, resp = update_volcoll(
+ client_obj,
+ volcoll_resp,
+ name=change_name,
+ description=description,
+ app_sync=app_sync,
+ app_server=app_server,
+ app_id=app_id,
+ app_cluster=app_cluster,
+ app_service=app_service,
+ vcenter_hostname=vcenter_hostname,
+ vcenter_username=vcenter_username,
+ vcenter_password=vcenter_password,
+ agent_hostname=agent_hostname,
+ agent_username=agent_username,
+ agent_password=agent_password,
+ metadata=metadata)
+
+ elif state == "absent":
+ return_status, changed, msg, changed_attrs_dict = delete_volcoll(client_obj, volcoll_name)
+
+ except Exception as ex:
+ # failed for some reason.
+ msg = str(ex)
+
+ if return_status:
+ if utils.is_null_or_empty(resp):
+ module.exit_json(return_status=return_status, changed=changed, msg=msg)
+ else:
+ module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
+ else:
+ module.fail_json(return_status=return_status, changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()